From 7866c319d00734f10732fd493d60512500b90317 Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Wed, 21 Aug 2024 09:08:09 -0700 Subject: [PATCH 1/8] [Bug] Update resource failures w/ Finalizers set (#423) (#5673) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Overview when [informer cache has stale values](https://github.com/unionai/flyte/blob/1e82352dd95f89630e333fe6105d5fdb5487a24e/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go#L478), we cannot update the k8s resource when [clearing finalizers](https://github.com/unionai/flyte/blob/1e82352dd95f89630e333fe6105d5fdb5487a24e/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go#L450) and get `Error: Operation cannot be fulfilled on pods.` The current implementation bubbles up the error resulting in a system retry. By the next loop, the informer cache is up to date and the update is able to be applied. However, in an ArrayNode with many subnodes getting executed in parallel, the execution can easily run out of retries. This update adds a basic retry with exponential backoff to wait for the informer cache to get up to date. ## Test Plan Ran in dogfood-gcp - https://buildkite.com/unionai/managed-cluster-staging-sync/builds/4622 + manually updated configmap to enabled finalizers - Run without change (https://dogfood-gcp.cloud-staging.union.ai/console/projects/flytesnacks/domains/development/executions/fd16ac81fd7b5480fb6f/nodes) - Run with change (https://dogfood-gcp.cloud-staging.union.ai/console/projects/flytesnacks/domains/development/executions/f016a3be7fa304db5a77/nodeId/n0/nodes) confirmed in logs that conflict errors: ``` {"json":{"exec_id":"f016a3be7fa304db5a77","node":"n0/n42","ns":"development","res_ver":"146129599","routine":"worker-66","src":"plugin_manager.go:455","wf":"flytesnacks:development:tests.flytekit.integration.map_task_issue.wf8"},"level":"warning","msg":"Failed to clear finalizers for Resource with name: development/f016a3be7fa304db5a77-n0-0-n42-0. Error: Operation cannot be fulfilled on pods \"f016a3be7fa304db5a77-n0-0-n42-0\": the object has been modified; please apply your changes to the latest version and try again","ts":"2024-08-17T02:02:48Z"} ``` did not bubble up + confirmed finalizers were removed: ``` ➜ ~ k get pods -n development f016a3be7fa304db5a77-n0-0-n42-0 -o json | grep -i final INFO[0000] [0] Couldn't find a config file []. Relying on env vars and pflags. ➜ ~ ``` ## Rollout Plan (if applicable) - revert changes to customer's config that disabled finalizers ## Upstream Changes Should this change be upstreamed to OSS (flyteorg/flyte)? If not, please uncheck this box, which is used for auditing. Note, it is the responsibility of each developer to actually upstream their changes. See [this guide](https://unionai.atlassian.net/wiki/spaces/ENG/pages/447610883/Flyte+-+Union+Cloud+Development+Runbook/#When-are-versions-updated%3F). - [x] To be upstreamed to OSS ## Issue fixes: https://linear.app/unionai/issue/COR-1558/investigate-why-finalizers-consume-system-retries-in-map-tasks ## Checklist * [ ] Added tests * [x] Ran a deploy dry run and shared the terraform plan * [ ] Added logging and metrics * [ ] Updated [dashboards](https://unionai.grafana.net/dashboards) and [alerts](https://unionai.grafana.net/alerting/list) * [ ] Updated documentation Signed-off-by: Paul Dittamo --- .../pluginmachinery/flytek8s/config/config.go | 8 ++ .../flytek8s/config/k8spluginconfig_flags.go | 2 + .../config/k8spluginconfig_flags_test.go | 28 +++++++ .../nodes/task/k8s/plugin_manager.go | 79 +++++++++++++------ 4 files changed, 92 insertions(+), 25 deletions(-) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go index 109ef06ba1..eb19015586 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go @@ -64,6 +64,8 @@ var ( DefaultPodTemplateResync: config2.Duration{ Duration: 30 * time.Second, }, + UpdateBaseBackoffDuration: 10, + UpdateBackoffRetries: 5, } // K8sPluginConfigSection provides a singular top level config section for all plugins. @@ -206,6 +208,12 @@ type K8sPluginConfig struct { // SendObjectEvents indicates whether to send k8s object events in TaskExecutionEvent updates (similar to kubectl get events). SendObjectEvents bool `json:"send-object-events" pflag:",If true, will send k8s object events in TaskExecutionEvent updates."` + + // Initial delay in exponential backoff when updating a resource in milliseconds. + UpdateBaseBackoffDuration int `json:"update-base-backoff-duration" pflag:",Initial delay in exponential backoff when updating a resource in milliseconds."` + + // Number of retries for exponential backoff when updating a resource. + UpdateBackoffRetries int `json:"update-backoff-retries" pflag:",Number of retries for exponential backoff when updating a resource."` } // FlyteCoPilotConfig specifies configuration for the Flyte CoPilot system. FlyteCoPilot, allows running flytekit-less containers diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go index 7a3f1c951e..4652d0bfd4 100755 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go @@ -67,5 +67,7 @@ func (cfg K8sPluginConfig) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-pod-template-name"), defaultK8sConfig.DefaultPodTemplateName, "Name of the PodTemplate to use as the base for all k8s pods created by FlytePropeller.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-pod-template-resync"), defaultK8sConfig.DefaultPodTemplateResync.String(), "Frequency of resyncing default pod templates") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "send-object-events"), defaultK8sConfig.SendObjectEvents, "If true, will send k8s object events in TaskExecutionEvent updates.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-base-backoff-duration"), defaultK8sConfig.UpdateBaseBackoffDuration, "Initial delay in exponential backoff when updating a resource in milliseconds.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-backoff-retries"), defaultK8sConfig.UpdateBackoffRetries, "Number of retries for exponential backoff when updating a resource.") return cmdFlags } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go index 4d5918a3b5..cc46ffa466 100755 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go @@ -337,4 +337,32 @@ func TestK8sPluginConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_update-base-backoff-duration", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("update-base-backoff-duration", testValue) + if vInt, err := cmdFlags.GetInt("update-base-backoff-duration"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vInt), &actual.UpdateBaseBackoffDuration) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_update-backoff-retries", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("update-backoff-retries", testValue) + if vInt, err := cmdFlags.GetInt("update-backoff-retries"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vInt), &actual.UpdateBackoffRetries) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) } diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go index f9c3806ee6..42d3ad9b85 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/util/workqueue" @@ -92,9 +93,11 @@ type PluginManager struct { kubeClient pluginsCore.KubeClient metrics PluginMetrics // Per namespace-resource - backOffController *backoff.Controller - resourceLevelMonitor *ResourceLevelMonitor - eventWatcher EventWatcher + backOffController *backoff.Controller + resourceLevelMonitor *ResourceLevelMonitor + eventWatcher EventWatcher + updateBaseBackoffDuration int + updateBackoffRetries int } func (e *PluginManager) addObjectMetadata(taskCtx pluginsCore.TaskExecutionMetadata, o client.Object, cfg *config.K8sPluginConfig) { @@ -463,25 +466,48 @@ func (e *PluginManager) Finalize(ctx context.Context, tCtx pluginsCore.TaskExecu } nsName = k8stypes.NamespacedName{Namespace: o.GetNamespace(), Name: o.GetName()} + retryBackoff := wait.Backoff{ + Duration: time.Duration(e.updateBaseBackoffDuration) * time.Millisecond, + Factor: 2.0, + Jitter: 0.1, + Steps: e.updateBackoffRetries, + } + // Attempt to cleanup finalizers so that the object may be deleted/garbage collected. We try to clear them for all // objects, regardless of whether or not InjectFinalizer is configured to handle all cases where InjectFinalizer is // enabled/disabled during object execution. - if err := e.kubeClient.GetClient().Get(ctx, nsName, o); err != nil { - if isK8sObjectNotExists(err) { - return nil + var lastErr error + _ = wait.ExponentialBackoff(retryBackoff, func() (bool, error) { + lastErr = nil + if err := e.kubeClient.GetClient().Get(ctx, nsName, o); err != nil { + if isK8sObjectNotExists(err) { + return true, nil + } + lastErr = err + // This happens sometimes because a node gets removed and K8s deletes the pod. This will result in a + // Pod does not exist error. This should be retried using the retry policy + logger.Warningf(ctx, "Failed in finalizing get Resource with name: %v. Error: %v", nsName, err) + return true, err } - // This happens sometimes because a node gets removed and K8s deletes the pod. This will result in a - // Pod does not exist error. This should be retried using the retry policy - logger.Warningf(ctx, "Failed in finalizing get Resource with name: %v. Error: %v", nsName, err) - return err - } - // This must happen after sending admin event. It's safe against partial failures because if the event failed, we will - // simply retry in the next round. If the event succeeded but this failed, we will try again the next round to send - // the same event (idempotent) and then come here again... - err = e.clearFinalizers(ctx, o) - if err != nil { - errs.Append(err) + // This must happen after sending admin event. It's safe against partial failures because if the event failed, we will + // simply retry in the next round. If the event succeeded but this failed, we will try again the next round to send + // the same event (idempotent) and then come here again... + if err := e.clearFinalizers(ctx, o); err != nil { + lastErr = err + // retry is if there is a conflict in case the informer cache is out of sync + if k8serrors.IsConflict(err) { + logger.Warningf(ctx, "Failed to clear finalizers for Resource with name: %v. Error: %v. Retrying..", nsName, err) + return false, nil + } + logger.Warningf(ctx, "Failed to clear finalizers for Resource with name: %v. Error: %v", nsName, err) + return true, err + } + return true, nil + }) + + if lastErr != nil { + errs.Append(lastErr) } // If we should delete the resource when finalize is called, do a best effort delete. @@ -630,8 +656,9 @@ func NewPluginManager(ctx context.Context, iCtx pluginsCore.SetupContext, entry return nil, err } + k8sConfig := config.GetK8sPluginConfig() var eventWatcher EventWatcher - if config.GetK8sPluginConfig().SendObjectEvents { + if k8sConfig.SendObjectEvents { eventWatcher, err = NewEventWatcher(ctx, gvk, kubeClientset) if err != nil { return nil, err @@ -645,13 +672,15 @@ func NewPluginManager(ctx context.Context, iCtx pluginsCore.SetupContext, entry rm.RunCollectorOnce(ctx) return &PluginManager{ - id: entry.ID, - plugin: entry.Plugin, - resourceToWatch: entry.ResourceToWatch, - metrics: newPluginMetrics(metricsScope), - kubeClient: kubeClient, - resourceLevelMonitor: rm, - eventWatcher: eventWatcher, + id: entry.ID, + plugin: entry.Plugin, + resourceToWatch: entry.ResourceToWatch, + metrics: newPluginMetrics(metricsScope), + kubeClient: kubeClient, + resourceLevelMonitor: rm, + eventWatcher: eventWatcher, + updateBaseBackoffDuration: k8sConfig.UpdateBaseBackoffDuration, + updateBackoffRetries: k8sConfig.UpdateBackoffRetries, }, nil } From e9413c0ccb6672a0129e7e357bbf8a92ae0a3411 Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:29:37 -0700 Subject: [PATCH 2/8] [BUG] array node eventing bump version (#5680) * [BUG] add retries to handle array node eventing race condition (#421) If there is an error updating a [FlyteWorkflow CRD](https://github.com/unionai/flyte/blob/6a7207c5345604a28a9d4e3699becff767f520f5/flytepropeller/pkg/controller/handler.go#L378), then the propeller streak ends without the CRD getting updated and the in-memory copy of the FlyteWorkflow is not utilized on the next loop. [TaskPhaseVersion](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go#L239) is stored in the FlyteWorkflow. This is incremented when there is an update to node/subnode state to ensure that events are unique. If the events stay in the same state and have the same TaskPhaseVersion, then they [get short-circuited and don't get emitted to admin](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/events/admin_eventsink.go#L59) or will get returned as an [AlreadyExists error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flyteadmin/pkg/manager/impl/task_execution_manager.go#L172) and get [handled in propeller to not bubble up in an error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/nodes/node_exec_context.go#L38). We can run into issues with ArrayNode eventing when: - array node handler increments task phase version from "0" to "1" - admin event sink emits event with version "1" - the propeller controller is not able to update the FlyteWorkflow CRD, so the ArrayNodeStatus indicates taskPhaseVersion is still 0 - next loop, array node handler increments task phase version from "0" to "1" - admin event sink prevents the event from getting emitted as an event with the same ID has already been received. No error is bubbled up. This means we lose subnode state until there is an event that contains an update to that subnode. If the lost state is the subnode reaching a terminal state, then the subnode state (from admin/UI) is "stuck" in a non-terminal state. I confirmed this to be an issue in the load-test-cluster. Whenever, there was an [error syncing the FlyteWorkflow](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/workers.go#L91), the next round of eventing in ArrayNode would fail unless the ArrayNode phase changed. - added unit test - tested locally in sandbox - test in dogfood - https://buildkite.com/unionai/managed-cluster-staging-sync/builds/4398#01914a1a-f6d6-42a5-b41b-7b6807f27370 - should be fine to rollout to prod Should this change be upstreamed to OSS (flyteorg/flyte)? If not, please uncheck this box, which is used for auditing. Note, it is the responsibility of each developer to actually upstream their changes. See [this guide](https://unionai.atlassian.net/wiki/spaces/ENG/pages/447610883/Flyte+-+Union+Cloud+Development+Runbook/#When-are-versions-updated%3F). - [x] To be upstreamed to OSS fixes: https://linear.app/unionai/issue/COR-1534/bug-arraynode-shows-non-complete-jobs-in-ui-when-the-job-is-actually * [x] Added tests * [x] Ran a deploy dry run and shared the terraform plan * [ ] Added logging and metrics * [ ] Updated [dashboards](https://unionai.grafana.net/dashboards) and [alerts](https://unionai.grafana.net/alerting/list) * [ ] Updated documentation Signed-off-by: Paul Dittamo * handle already exists error on array node abort (#427) * handle already exists error on array node abort Signed-off-by: Paul Dittamo * update comment Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo * [BUG] set cause for already exists EventError (#432) * set cause for already exists EventError Signed-off-by: Paul Dittamo * add nil check event error Signed-off-by: Paul Dittamo * lint Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo --- flytepropeller/events/admin_eventsink.go | 6 +- flytepropeller/events/admin_eventsink_test.go | 9 +- flytepropeller/events/errors/errors.go | 6 +- .../pkg/controller/config/config.go | 1 + .../pkg/controller/config/config_flags.go | 1 + .../controller/config/config_flags_test.go | 14 ++ .../pkg/controller/nodes/array/handler.go | 51 +++++- .../controller/nodes/array/handler_test.go | 163 ++++++++++++++++-- .../pkg/controller/nodes/node_exec_context.go | 3 + 9 files changed, 225 insertions(+), 29 deletions(-) diff --git a/flytepropeller/events/admin_eventsink.go b/flytepropeller/events/admin_eventsink.go index cb4b88a69a..3da6cca421 100644 --- a/flytepropeller/events/admin_eventsink.go +++ b/flytepropeller/events/admin_eventsink.go @@ -57,7 +57,11 @@ func (s *adminEventSink) Sink(ctx context.Context, message proto.Message) error if s.filter.Contains(ctx, id) { logger.Debugf(ctx, "event '%s' has already been sent", string(id)) - return nil + return &errors.EventError{ + Code: errors.AlreadyExists, + Cause: fmt.Errorf("event has already been sent"), + Message: "Event Already Exists", + } } // Validate submission with rate limiter and send admin event diff --git a/flytepropeller/events/admin_eventsink_test.go b/flytepropeller/events/admin_eventsink_test.go index c13b7ad47f..510371d056 100644 --- a/flytepropeller/events/admin_eventsink_test.go +++ b/flytepropeller/events/admin_eventsink_test.go @@ -184,13 +184,16 @@ func TestAdminFilterContains(t *testing.T) { filter.OnContainsMatch(mock.Anything, mock.Anything).Return(true) wfErr := adminEventSink.Sink(ctx, wfEvent) - assert.NoError(t, wfErr) + assert.Error(t, wfErr) + assert.True(t, errors.IsAlreadyExists(wfErr)) nodeErr := adminEventSink.Sink(ctx, nodeEvent) - assert.NoError(t, nodeErr) + assert.Error(t, nodeErr) + assert.True(t, errors.IsAlreadyExists(nodeErr)) taskErr := adminEventSink.Sink(ctx, taskEvent) - assert.NoError(t, taskErr) + assert.Error(t, taskErr) + assert.True(t, errors.IsAlreadyExists(taskErr)) } func TestIDFromMessage(t *testing.T) { diff --git a/flytepropeller/events/errors/errors.go b/flytepropeller/events/errors/errors.go index 879b8b07d7..2d3e02e0df 100644 --- a/flytepropeller/events/errors/errors.go +++ b/flytepropeller/events/errors/errors.go @@ -33,7 +33,11 @@ type EventError struct { } func (r EventError) Error() string { - return fmt.Sprintf("%s: %s, caused by [%s]", r.Code, r.Message, r.Cause.Error()) + var cause string + if r.Cause != nil { + cause = r.Cause.Error() + } + return fmt.Sprintf("%s: %s, caused by [%s]", r.Code, r.Message, cause) } func (r *EventError) Is(target error) bool { diff --git a/flytepropeller/pkg/controller/config/config.go b/flytepropeller/pkg/controller/config/config.go index 419386eddd..f058212322 100644 --- a/flytepropeller/pkg/controller/config/config.go +++ b/flytepropeller/pkg/controller/config/config.go @@ -259,6 +259,7 @@ const ( type EventConfig struct { RawOutputPolicy RawOutputPolicy `json:"raw-output-policy" pflag:",How output data should be passed along in execution events."` FallbackToOutputReference bool `json:"fallback-to-output-reference" pflag:",Whether output data should be sent by reference when it is too large to be sent inline in execution events."` + ErrorOnAlreadyExists bool `json:"error-on-already-exists" pflag:",Whether to return an error when an event already exists."` } // ParallelismBehavior defines how ArrayNode should handle subNode parallelism by default diff --git a/flytepropeller/pkg/controller/config/config_flags.go b/flytepropeller/pkg/controller/config/config_flags.go index ea0b428c2f..d2dc0971ff 100755 --- a/flytepropeller/pkg/controller/config/config_flags.go +++ b/flytepropeller/pkg/controller/config/config_flags.go @@ -100,6 +100,7 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "max-streak-length"), defaultConfig.MaxStreakLength, "Maximum number of consecutive rounds that one propeller worker can use for one workflow - >1 => turbo-mode is enabled.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "event-config.raw-output-policy"), defaultConfig.EventConfig.RawOutputPolicy, "How output data should be passed along in execution events.") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.fallback-to-output-reference"), defaultConfig.EventConfig.FallbackToOutputReference, "Whether output data should be sent by reference when it is too large to be sent inline in execution events.") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.error-on-already-exists"), defaultConfig.EventConfig.ErrorOnAlreadyExists, "Whether to return an error when an event already exists.") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-shard-key-label"), defaultConfig.IncludeShardKeyLabel, "Include the specified shard key label in the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "exclude-shard-key-label"), defaultConfig.ExcludeShardKeyLabel, "Exclude the specified shard key label from the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-project-label"), defaultConfig.IncludeProjectLabel, "Include the specified project label in the k8s FlyteWorkflow CRD label selector") diff --git a/flytepropeller/pkg/controller/config/config_flags_test.go b/flytepropeller/pkg/controller/config/config_flags_test.go index bce7238f60..66a14381af 100755 --- a/flytepropeller/pkg/controller/config/config_flags_test.go +++ b/flytepropeller/pkg/controller/config/config_flags_test.go @@ -799,6 +799,20 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_event-config.error-on-already-exists", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("event-config.error-on-already-exists", testValue) + if vBool, err := cmdFlags.GetBool("event-config.error-on-already-exists"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.EventConfig.ErrorOnAlreadyExists) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_include-shard-key-label", func(t *testing.T) { t.Run("Override", func(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/array/handler.go b/flytepropeller/pkg/controller/nodes/array/handler.go index 315041cb51..a101ed5a30 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler.go +++ b/flytepropeller/pkg/controller/nodes/array/handler.go @@ -11,6 +11,7 @@ import ( "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/ioutils" "github.com/flyteorg/flyte/flyteplugins/go/tasks/plugins/array/errorcollector" "github.com/flyteorg/flyte/flytepropeller/events" + eventsErr "github.com/flyteorg/flyte/flytepropeller/events/errors" "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/compiler/validators" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" @@ -21,6 +22,7 @@ import ( "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/interfaces" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task/k8s" "github.com/flyteorg/flyte/flytestdlib/bitarray" + stdConfig "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/logger" "github.com/flyteorg/flyte/flytestdlib/promutils" "github.com/flyteorg/flyte/flytestdlib/storage" @@ -112,6 +114,10 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut // update state for subNodes if err := eventRecorder.finalize(ctx, nCtx, idlcore.TaskExecution_ABORTED, 0, a.eventConfig); err != nil { + // a task event with abort phase is already emitted when handling ArrayNodePhaseFailing + if eventsErr.IsAlreadyExists(err) { + return nil + } logger.Errorf(ctx, "ArrayNode event recording failed: [%s]", err.Error()) return err } @@ -579,12 +585,35 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu // increment taskPhaseVersion if we detect any changes in subNode state. if incrementTaskPhaseVersion { - arrayNodeState.TaskPhaseVersion = arrayNodeState.TaskPhaseVersion + 1 + arrayNodeState.TaskPhaseVersion++ } - if err := eventRecorder.finalize(ctx, nCtx, taskPhase, arrayNodeState.TaskPhaseVersion, a.eventConfig); err != nil { - logger.Errorf(ctx, "ArrayNode event recording failed: [%s]", err.Error()) - return handler.UnknownTransition, err + const maxRetries = 3 + retries := 0 + for retries <= maxRetries { + err := eventRecorder.finalize(ctx, nCtx, taskPhase, arrayNodeState.TaskPhaseVersion, a.eventConfig) + + if err == nil { + break + } + + // Handle potential race condition if FlyteWorkflow CRD fails to get synced + if eventsErr.IsAlreadyExists(err) { + if !incrementTaskPhaseVersion { + break + } + logger.Warnf(ctx, "Event version already exists, bumping version and retrying (%d/%d): [%s]", retries+1, maxRetries, err.Error()) + arrayNodeState.TaskPhaseVersion++ + } else { + logger.Errorf(ctx, "ArrayNode event recording failed: [%s]", err.Error()) + return handler.UnknownTransition, err + } + + retries++ + if retries > maxRetries { + logger.Errorf(ctx, "ArrayNode event recording failed after %d retries: [%s]", maxRetries, err.Error()) + return handler.UnknownTransition, err + } } // if the ArrayNode phase has changed we need to reset the taskPhaseVersion to 0 @@ -632,9 +661,21 @@ func New(nodeExecutor interfaces.Node, eventConfig *config.EventConfig, scope pr return nil, err } + eventConfigCopy, err := stdConfig.DeepCopyConfig(eventConfig) + if err != nil { + return nil, err + } + + deepCopiedEventConfig, ok := eventConfigCopy.(*config.EventConfig) + if !ok { + return nil, fmt.Errorf("deep copy error: expected *config.EventConfig, but got %T", eventConfigCopy) + } + + deepCopiedEventConfig.ErrorOnAlreadyExists = true + arrayScope := scope.NewSubScope("array") return &arrayNodeHandler{ - eventConfig: eventConfig, + eventConfig: deepCopiedEventConfig, gatherOutputsRequestChannel: make(chan *gatherOutputsRequest), metrics: newMetrics(arrayScope), nodeExecutionRequestChannel: make(chan *nodeExecutionRequest), diff --git a/flytepropeller/pkg/controller/nodes/array/handler_test.go b/flytepropeller/pkg/controller/nodes/array/handler_test.go index ee1fc5b80b..d27b412c1f 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/array/handler_test.go @@ -12,7 +12,8 @@ import ( idlcore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" - pluginmocks "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io/mocks" + pluginiomocks "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io/mocks" + eventsErr "github.com/flyteorg/flyte/flytepropeller/events/errors" eventmocks "github.com/flyteorg/flyte/flytepropeller/events/mocks" "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" @@ -50,7 +51,7 @@ func createArrayNodeHandler(ctx context.Context, t *testing.T, nodeHandler inter // mock components adminClient := launchplan.NewFailFastLaunchPlanExecutor() enqueueWorkflowFunc := func(workflowID v1alpha1.WorkflowID) {} - eventConfig := &config.EventConfig{} + eventConfig := &config.EventConfig{ErrorOnAlreadyExists: true} mockEventSink := eventmocks.NewMockEventSink() mockHandlerFactory := &mocks.HandlerFactory{} mockHandlerFactory.OnGetHandlerMatch(mock.Anything).Return(nodeHandler, nil) @@ -135,7 +136,7 @@ func createNodeExecutionContext(dataStore *storage.DataStore, eventRecorder inte nCtx.OnEventsRecorder().Return(eventRecorder) // InputReader - inputFilePaths := &pluginmocks.InputFilePaths{} + inputFilePaths := &pluginiomocks.InputFilePaths{} inputFilePaths.OnGetInputPath().Return(storage.DataReference("s3://bucket/input")) nCtx.OnInputReader().Return( newStaticInputReader( @@ -459,6 +460,24 @@ func uint32Ptr(v uint32) *uint32 { return &v } +type fakeEventRecorder struct { + taskErr error + phaseVersionFailures uint32 + recordTaskEventCallCount int +} + +func (f *fakeEventRecorder) RecordNodeEvent(ctx context.Context, event *event.NodeExecutionEvent, eventConfig *config.EventConfig) error { + return nil +} + +func (f *fakeEventRecorder) RecordTaskEvent(ctx context.Context, event *event.TaskExecutionEvent, eventConfig *config.EventConfig) error { + f.recordTaskEventCallCount++ + if f.phaseVersionFailures == 0 || event.PhaseVersion < f.phaseVersionFailures { + return f.taskErr + } + return nil +} + func TestHandleArrayNodePhaseExecuting(t *testing.T) { ctx := context.Background() @@ -492,11 +511,18 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { subNodeTaskPhases []core.Phase subNodeTransitions []handler.Transition expectedArrayNodePhase v1alpha1.ArrayNodePhase + expectedArrayNodeSubPhases []v1alpha1.NodePhase expectedTransitionPhase handler.EPhase expectedExternalResourcePhases []idlcore.TaskExecution_Phase currentWfParallelism uint32 maxWfParallelism uint32 incrementParallelismCount uint32 + useFakeEventRecorder bool + eventRecorderFailures uint32 + eventRecorderError error + expectedTaskPhaseVersion uint32 + expectHandleError bool + expectedEventingCalls int }{ { name: "StartAllSubNodes", @@ -514,6 +540,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, incrementParallelismCount: 1, @@ -533,6 +560,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, incrementParallelismCount: 1, @@ -553,6 +581,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, currentWfParallelism: 0, @@ -573,6 +602,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, currentWfParallelism: workflowMaxParallelism - 1, @@ -591,6 +621,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, subNodeTransitions: []handler.Transition{}, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{}, currentWfParallelism: workflowMaxParallelism, @@ -612,6 +643,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, incrementParallelismCount: 1, @@ -632,6 +664,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_SUCCEEDED}, }, @@ -652,6 +685,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(0, "", "", &handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_FAILED}, }, @@ -671,9 +705,78 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseFailing, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_FAILED, idlcore.TaskExecution_SUCCEEDED}, }, + { + name: "EventingAlreadyExists_EventuallySucceeds", + parallelism: uint32Ptr(0), + subNodePhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, + subNodeTaskPhases: []core.Phase{ + core.PhaseRunning, + core.PhaseRunning, + }, + subNodeTransitions: []handler.Transition{ + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + }, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 2, + expectedTransitionPhase: handler.EPhaseRunning, + expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, + useFakeEventRecorder: true, + eventRecorderFailures: 2, + eventRecorderError: &eventsErr.EventError{Code: eventsErr.AlreadyExists, Cause: fmt.Errorf("err")}, + incrementParallelismCount: 1, + expectedEventingCalls: 2, + }, + { + name: "EventingAlreadyExists_EventuallyFails", + parallelism: uint32Ptr(0), + subNodePhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, + subNodeTaskPhases: []core.Phase{ + core.PhaseRunning, + core.PhaseRunning, + }, + subNodeTransitions: []handler.Transition{ + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + }, + expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, + useFakeEventRecorder: true, + eventRecorderFailures: 5, + eventRecorderError: &eventsErr.EventError{Code: eventsErr.AlreadyExists, Cause: fmt.Errorf("err")}, + expectHandleError: true, + expectedEventingCalls: 4, + }, + { + name: "EventingFails", + parallelism: uint32Ptr(0), + subNodePhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, + subNodeTaskPhases: []core.Phase{ + core.PhaseRunning, + core.PhaseRunning, + }, + subNodeTransitions: []handler.Transition{ + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + }, + expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, + useFakeEventRecorder: true, + eventRecorderError: fmt.Errorf("err"), + expectHandleError: true, + expectedEventingCalls: 1, + }, } for _, test := range tests { @@ -684,6 +787,15 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, scope) assert.NoError(t, err) + var eventRecorder interfaces.EventRecorder + if test.useFakeEventRecorder { + eventRecorder = &fakeEventRecorder{ + phaseVersionFailures: test.eventRecorderFailures, + taskErr: test.eventRecorderError, + } + } else { + eventRecorder = newBufferedEventRecorder() + } // initialize ArrayNodeState arrayNodeState := &handler.ArrayNodeState{ Phase: v1alpha1.ArrayNodePhaseExecuting, @@ -705,18 +817,12 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { for i, nodePhase := range test.subNodePhases { arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) } - for i, taskPhase := range test.subNodeTaskPhases { - arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) - } - - // create NodeExecutionContext - eventRecorder := newBufferedEventRecorder() nodeSpec := arrayNodeSpec nodeSpec.ArrayNode.Parallelism = test.parallelism nodeSpec.ArrayNode.MinSuccessRatio = test.minSuccessRatio - nCtx := createNodeExecutionContext(dataStore, eventRecorder, nil, literalMap, &arrayNodeSpec, arrayNodeState, test.currentWfParallelism, workflowMaxParallelism) + nCtx := createNodeExecutionContext(dataStore, eventRecorder, nil, literalMap, &nodeSpec, arrayNodeState, test.currentWfParallelism, workflowMaxParallelism) // initialize ArrayNodeHandler nodeHandler := &mocks.NodeHandler{} @@ -745,22 +851,41 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { // evaluate node transition, err := arrayNodeHandler.Handle(ctx, nCtx) - assert.NoError(t, err) + + fakeEventRecorder, ok := eventRecorder.(*fakeEventRecorder) + if ok { + assert.Equal(t, test.expectedEventingCalls, fakeEventRecorder.recordTaskEventCallCount) + } + + if !test.expectHandleError { + assert.NoError(t, err) + } else { + assert.Error(t, err) + return + } // validate results assert.Equal(t, test.expectedArrayNodePhase, arrayNodeState.Phase) assert.Equal(t, test.expectedTransitionPhase, transition.Info().GetPhase()) + assert.Equal(t, test.expectedTaskPhaseVersion, arrayNodeState.TaskPhaseVersion) - if len(test.expectedExternalResourcePhases) > 0 { - assert.Equal(t, 1, len(eventRecorder.taskExecutionEvents)) + for i, expectedPhase := range test.expectedArrayNodeSubPhases { + assert.Equal(t, expectedPhase, v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(i))) + } - externalResources := eventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources() - assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources)) - for i, expectedPhase := range test.expectedExternalResourcePhases { - assert.Equal(t, expectedPhase, externalResources[i].Phase) + bufferedEventRecorder, ok := eventRecorder.(*bufferedEventRecorder) + if ok { + if len(test.expectedExternalResourcePhases) > 0 { + assert.Equal(t, 1, len(bufferedEventRecorder.taskExecutionEvents)) + + externalResources := bufferedEventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources() + assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources)) + for i, expectedPhase := range test.expectedExternalResourcePhases { + assert.Equal(t, expectedPhase, externalResources[i].Phase) + } + } else { + assert.Equal(t, 0, len(bufferedEventRecorder.taskExecutionEvents)) } - } else { - assert.Equal(t, 0, len(eventRecorder.taskExecutionEvents)) } nCtx.ExecutionContext().(*execmocks.ExecutionContext).AssertNumberOfCalls(t, "IncrementParallelism", int(test.incrementParallelismCount)) diff --git a/flytepropeller/pkg/controller/nodes/node_exec_context.go b/flytepropeller/pkg/controller/nodes/node_exec_context.go index a579b241f3..7de31100c6 100644 --- a/flytepropeller/pkg/controller/nodes/node_exec_context.go +++ b/flytepropeller/pkg/controller/nodes/node_exec_context.go @@ -36,6 +36,9 @@ type eventRecorder struct { func (e eventRecorder) RecordTaskEvent(ctx context.Context, ev *event.TaskExecutionEvent, eventConfig *config.EventConfig) error { if err := e.taskEventRecorder.RecordTaskEvent(ctx, ev, eventConfig); err != nil { if eventsErr.IsAlreadyExists(err) { + if eventConfig.ErrorOnAlreadyExists { + return err + } logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.Phase) return nil } else if eventsErr.IsEventAlreadyInTerminalStateError(err) { From 38cc76dd78e7f46283f160189cb6b38cf8fb1c8d Mon Sep 17 00:00:00 2001 From: ddl-rliu <140021987+ddl-rliu@users.noreply.github.com> Date: Thu, 22 Aug 2024 23:53:33 -0700 Subject: [PATCH 3/8] Add custominfo to agents (#5604) Signed-off-by: ddl-rliu --- .../go/tasks/plugins/webapi/agent/plugin.go | 32 +++++++++++-------- .../tasks/plugins/webapi/agent/plugin_test.go | 16 +++++++--- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go index 20a65ccba1..a7b2a3d1d4 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go @@ -8,6 +8,7 @@ import ( "time" "golang.org/x/exp/maps" + "google.golang.org/protobuf/types/known/structpb" "k8s.io/apimachinery/pkg/util/wait" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" @@ -39,10 +40,11 @@ type Plugin struct { type ResourceWrapper struct { Phase flyteIdl.TaskExecution_Phase // Deprecated: Please Use Phase instead. - State admin.State - Outputs *flyteIdl.LiteralMap - Message string - LogLinks []*flyteIdl.TaskLog + State admin.State + Outputs *flyteIdl.LiteralMap + Message string + LogLinks []*flyteIdl.TaskLog + CustomInfo *structpb.Struct } // IsTerminal is used to avoid making network calls to the agent service if the resource is already in a terminal state. @@ -192,10 +194,11 @@ func (p *Plugin) ExecuteTaskSync( } return nil, ResourceWrapper{ - Phase: resource.Phase, - Outputs: resource.Outputs, - Message: resource.Message, - LogLinks: resource.LogLinks, + Phase: resource.Phase, + Outputs: resource.Outputs, + Message: resource.Message, + LogLinks: resource.LogLinks, + CustomInfo: resource.CustomInfo, }, err } @@ -221,11 +224,12 @@ func (p *Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest web } return ResourceWrapper{ - Phase: res.Resource.Phase, - State: res.Resource.State, - Outputs: res.Resource.Outputs, - Message: res.Resource.Message, - LogLinks: res.Resource.LogLinks, + Phase: res.Resource.Phase, + State: res.Resource.State, + Outputs: res.Resource.Outputs, + Message: res.Resource.Message, + LogLinks: res.Resource.LogLinks, + CustomInfo: res.Resource.CustomInfo, }, nil } @@ -254,7 +258,7 @@ func (p *Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error func (p *Plugin) Status(ctx context.Context, taskCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { resource := taskCtx.Resource().(ResourceWrapper) - taskInfo := &core.TaskInfo{Logs: resource.LogLinks} + taskInfo := &core.TaskInfo{Logs: resource.LogLinks, CustomInfo: resource.CustomInfo} switch resource.Phase { case flyteIdl.TaskExecution_QUEUED: diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go index 3db1c464b6..9e8c97903e 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "golang.org/x/exp/maps" + "google.golang.org/protobuf/types/known/structpb" agentMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" @@ -114,17 +115,24 @@ func TestPlugin(t *testing.T) { }) t.Run("test RUNNING Status", func(t *testing.T) { + simpleStruct := structpb.Struct{ + Fields: map[string]*structpb.Value{ + "foo": {Kind: &structpb.Value_StringValue{StringValue: "foo"}}, + }, + } taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - State: admin.State_RUNNING, - Outputs: nil, - Message: "Job is running", - LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, + State: admin.State_RUNNING, + Outputs: nil, + Message: "Job is running", + LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, + CustomInfo: &simpleStruct, }) phase, err := plugin.Status(context.Background(), taskContext) assert.NoError(t, err) assert.Equal(t, pluginsCore.PhaseRunning, phase.Phase()) + assert.Equal(t, &simpleStruct, phase.Info().CustomInfo) }) t.Run("test PERMANENT_FAILURE Status", func(t *testing.T) { From 780f98540e910d0df1e9b76f3e065bc25a068e48 Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:07:05 -0700 Subject: [PATCH 4/8] [BUG] use deep copy of bit arrays when getting array node state (#5681) * [BUG] add retries to handle array node eventing race condition (#421) If there is an error updating a [FlyteWorkflow CRD](https://github.com/unionai/flyte/blob/6a7207c5345604a28a9d4e3699becff767f520f5/flytepropeller/pkg/controller/handler.go#L378), then the propeller streak ends without the CRD getting updated and the in-memory copy of the FlyteWorkflow is not utilized on the next loop. [TaskPhaseVersion](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go#L239) is stored in the FlyteWorkflow. This is incremented when there is an update to node/subnode state to ensure that events are unique. If the events stay in the same state and have the same TaskPhaseVersion, then they [get short-circuited and don't get emitted to admin](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/events/admin_eventsink.go#L59) or will get returned as an [AlreadyExists error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flyteadmin/pkg/manager/impl/task_execution_manager.go#L172) and get [handled in propeller to not bubble up in an error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/nodes/node_exec_context.go#L38). We can run into issues with ArrayNode eventing when: - array node handler increments task phase version from "0" to "1" - admin event sink emits event with version "1" - the propeller controller is not able to update the FlyteWorkflow CRD, so the ArrayNodeStatus indicates taskPhaseVersion is still 0 - next loop, array node handler increments task phase version from "0" to "1" - admin event sink prevents the event from getting emitted as an event with the same ID has already been received. No error is bubbled up. This means we lose subnode state until there is an event that contains an update to that subnode. If the lost state is the subnode reaching a terminal state, then the subnode state (from admin/UI) is "stuck" in a non-terminal state. I confirmed this to be an issue in the load-test-cluster. Whenever, there was an [error syncing the FlyteWorkflow](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/workers.go#L91), the next round of eventing in ArrayNode would fail unless the ArrayNode phase changed. - added unit test - tested locally in sandbox - test in dogfood - https://buildkite.com/unionai/managed-cluster-staging-sync/builds/4398#01914a1a-f6d6-42a5-b41b-7b6807f27370 - should be fine to rollout to prod Should this change be upstreamed to OSS (flyteorg/flyte)? If not, please uncheck this box, which is used for auditing. Note, it is the responsibility of each developer to actually upstream their changes. See [this guide](https://unionai.atlassian.net/wiki/spaces/ENG/pages/447610883/Flyte+-+Union+Cloud+Development+Runbook/#When-are-versions-updated%3F). - [x] To be upstreamed to OSS fixes: https://linear.app/unionai/issue/COR-1534/bug-arraynode-shows-non-complete-jobs-in-ui-when-the-job-is-actually * [x] Added tests * [x] Ran a deploy dry run and shared the terraform plan * [ ] Added logging and metrics * [ ] Updated [dashboards](https://unionai.grafana.net/dashboards) and [alerts](https://unionai.grafana.net/alerting/list) * [ ] Updated documentation Signed-off-by: Paul Dittamo * handle already exists error on array node abort (#427) * handle already exists error on array node abort Signed-off-by: Paul Dittamo * update comment Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo * [BUG] set cause for already exists EventError (#432) * set cause for already exists EventError Signed-off-by: Paul Dittamo * add nil check event error Signed-off-by: Paul Dittamo * lint Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo * add deep copy for array node status Signed-off-by: Paul Dittamo * add deep copy for array node status Signed-off-by: Paul Dittamo * use deep copy of bit arrays when getting array node state Signed-off-by: Paul Dittamo * Revert "add deep copy for array node status" This reverts commit dde75951d87cb497e358a5bd0ff27f05078f5b72. Signed-off-by: Paul Dittamo * ignore ErrorOnAlreadyExists when marshalling event config Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo --- .../flyteworkflow/v1alpha1/node_status.go | 21 ++++++ .../v1alpha1/zz_generated.deepcopy.go | 4 ++ .../pkg/controller/config/config.go | 3 +- .../pkg/controller/config/config_flags.go | 2 +- .../controller/config/config_flags_test.go | 6 +- .../controller/nodes/array/handler_test.go | 70 ++++++++++++++++--- .../controller/nodes/node_state_manager.go | 24 +++++-- 7 files changed, 110 insertions(+), 20 deletions(-) diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go index aab034224d..218b045588 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go @@ -316,6 +316,27 @@ func (in *ArrayNodeStatus) SetTaskPhaseVersion(taskPhaseVersion uint32) { } } +func (in *ArrayNodeStatus) DeepCopyInto(out *ArrayNodeStatus) { + *out = *in + out.MutableStruct = in.MutableStruct + + if in.ExecutionError != nil { + in, out := &in.ExecutionError, &out.ExecutionError + *out = new(core.ExecutionError) + *out = *in + } +} + +func (in *ArrayNodeStatus) DeepCopy() *ArrayNodeStatus { + if in == nil { + return nil + } + + out := &ArrayNodeStatus{} + in.DeepCopyInto(out) + return out +} + type NodeStatus struct { MutableStruct Phase NodePhase `json:"phase,omitempty"` diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go index febbca733c..95cac582b8 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go @@ -548,6 +548,10 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { *out = new(DynamicNodeStatus) (*in).DeepCopyInto(*out) } + if in.ArrayNodeStatus != nil { + in, out := &in.ArrayNodeStatus, &out.ArrayNodeStatus + *out = (*in).DeepCopy() + } if in.Error != nil { in, out := &in.Error, &out.Error *out = (*in).DeepCopy() diff --git a/flytepropeller/pkg/controller/config/config.go b/flytepropeller/pkg/controller/config/config.go index f058212322..a0217e186a 100644 --- a/flytepropeller/pkg/controller/config/config.go +++ b/flytepropeller/pkg/controller/config/config.go @@ -259,7 +259,8 @@ const ( type EventConfig struct { RawOutputPolicy RawOutputPolicy `json:"raw-output-policy" pflag:",How output data should be passed along in execution events."` FallbackToOutputReference bool `json:"fallback-to-output-reference" pflag:",Whether output data should be sent by reference when it is too large to be sent inline in execution events."` - ErrorOnAlreadyExists bool `json:"error-on-already-exists" pflag:",Whether to return an error when an event already exists."` + // only meant to be overridden for certain node types that have different eventing behavior such as ArrayNode + ErrorOnAlreadyExists bool `json:"-"` } // ParallelismBehavior defines how ArrayNode should handle subNode parallelism by default diff --git a/flytepropeller/pkg/controller/config/config_flags.go b/flytepropeller/pkg/controller/config/config_flags.go index d2dc0971ff..858fc8a8ba 100755 --- a/flytepropeller/pkg/controller/config/config_flags.go +++ b/flytepropeller/pkg/controller/config/config_flags.go @@ -100,7 +100,7 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "max-streak-length"), defaultConfig.MaxStreakLength, "Maximum number of consecutive rounds that one propeller worker can use for one workflow - >1 => turbo-mode is enabled.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "event-config.raw-output-policy"), defaultConfig.EventConfig.RawOutputPolicy, "How output data should be passed along in execution events.") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.fallback-to-output-reference"), defaultConfig.EventConfig.FallbackToOutputReference, "Whether output data should be sent by reference when it is too large to be sent inline in execution events.") - cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.error-on-already-exists"), defaultConfig.EventConfig.ErrorOnAlreadyExists, "Whether to return an error when an event already exists.") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.-"), defaultConfig.EventConfig.ErrorOnAlreadyExists, "") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-shard-key-label"), defaultConfig.IncludeShardKeyLabel, "Include the specified shard key label in the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "exclude-shard-key-label"), defaultConfig.ExcludeShardKeyLabel, "Exclude the specified shard key label from the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-project-label"), defaultConfig.IncludeProjectLabel, "Include the specified project label in the k8s FlyteWorkflow CRD label selector") diff --git a/flytepropeller/pkg/controller/config/config_flags_test.go b/flytepropeller/pkg/controller/config/config_flags_test.go index 66a14381af..27e7b76efa 100755 --- a/flytepropeller/pkg/controller/config/config_flags_test.go +++ b/flytepropeller/pkg/controller/config/config_flags_test.go @@ -799,13 +799,13 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) - t.Run("Test_event-config.error-on-already-exists", func(t *testing.T) { + t.Run("Test_event-config.-", func(t *testing.T) { t.Run("Override", func(t *testing.T) { testValue := "1" - cmdFlags.Set("event-config.error-on-already-exists", testValue) - if vBool, err := cmdFlags.GetBool("event-config.error-on-already-exists"); err == nil { + cmdFlags.Set("event-config.-", testValue) + if vBool, err := cmdFlags.GetBool("event-config.-"); err == nil { testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.EventConfig.ErrorOnAlreadyExists) } else { diff --git a/flytepropeller/pkg/controller/nodes/array/handler_test.go b/flytepropeller/pkg/controller/nodes/array/handler_test.go index d27b412c1f..648d70e36c 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/array/handler_test.go @@ -539,7 +539,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -559,7 +563,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { subNodeTransitions: []handler.Transition{ handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseQueued, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, @@ -580,7 +588,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -601,7 +613,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { subNodeTransitions: []handler.Transition{ handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseQueued, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, @@ -619,8 +635,12 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { core.PhaseUndefined, core.PhaseUndefined, }, - subNodeTransitions: []handler.Transition{}, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + subNodeTransitions: []handler.Transition{}, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{}, @@ -642,7 +662,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -663,7 +687,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseSucceeded, + v1alpha1.NodePhaseSucceeded, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_SUCCEEDED}, @@ -684,7 +712,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(0, "", "", &handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseSucceeded, + v1alpha1.NodePhaseFailed, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_FAILED}, @@ -704,7 +736,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(0, "", "", &handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseFailing, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseFailing, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseFailed, + v1alpha1.NodePhaseSucceeded, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_FAILED, idlcore.TaskExecution_SUCCEEDED}, @@ -724,7 +760,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 2, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -749,6 +789,10 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, useFakeEventRecorder: true, eventRecorderFailures: 5, @@ -771,6 +815,10 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, useFakeEventRecorder: true, eventRecorderError: fmt.Errorf("err"), diff --git a/flytepropeller/pkg/controller/nodes/node_state_manager.go b/flytepropeller/pkg/controller/nodes/node_state_manager.go index 91cf1f2679..a9ead9afc3 100644 --- a/flytepropeller/pkg/controller/nodes/node_state_manager.go +++ b/flytepropeller/pkg/controller/nodes/node_state_manager.go @@ -160,11 +160,27 @@ func (n nodeStateManager) GetArrayNodeState() handler.ArrayNodeState { if an != nil { as.Phase = an.GetArrayNodePhase() as.Error = an.GetExecutionError() - as.SubNodePhases = an.GetSubNodePhases() - as.SubNodeTaskPhases = an.GetSubNodeTaskPhases() - as.SubNodeRetryAttempts = an.GetSubNodeRetryAttempts() - as.SubNodeSystemFailures = an.GetSubNodeSystemFailures() as.TaskPhaseVersion = an.GetTaskPhaseVersion() + + subNodePhases := an.GetSubNodePhases() + if subNodePhasesCopy := subNodePhases.DeepCopy(); subNodePhasesCopy != nil { + as.SubNodePhases = *subNodePhasesCopy + } + + subNodeTaskPhases := an.GetSubNodeTaskPhases() + if subNodeTaskPhasesCopy := subNodeTaskPhases.DeepCopy(); subNodeTaskPhasesCopy != nil { + as.SubNodeTaskPhases = *subNodeTaskPhasesCopy + } + + subNodeRetryAttempts := an.GetSubNodeRetryAttempts() + if subNodeRetryAttemptsCopy := subNodeRetryAttempts.DeepCopy(); subNodeRetryAttemptsCopy != nil { + as.SubNodeRetryAttempts = *subNodeRetryAttemptsCopy + } + + subNodeSystemFailures := an.GetSubNodeSystemFailures() + if subNodeSystemFailuresCopy := subNodeSystemFailures.DeepCopy(); subNodeSystemFailuresCopy != nil { + as.SubNodeSystemFailures = *subNodeSystemFailuresCopy + } } return as } From 3fb9525ab403fbdd788d5f5f49bb5cd1d312fb30 Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Fri, 23 Aug 2024 16:33:07 -0400 Subject: [PATCH 5/8] More concise definition of launchplan (#5682) * More concise definition of launchplan Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> * Update docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md Co-authored-by: Nikki Everett Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> * Update docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md Co-authored-by: Nikki Everett Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> --------- Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Co-authored-by: Nikki Everett --- .../tasks_workflows_and_launch_plans.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md b/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md index c8ca7f2071..f66988343a 100644 --- a/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md +++ b/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md @@ -263,15 +263,11 @@ Learn more about chaining flyte entities in the {ref}`User Guide Date: Fri, 23 Aug 2024 15:24:02 -0700 Subject: [PATCH 6/8] Auth/prevent lookup per call (#5686) * save values Signed-off-by: Yee Hing Tong * move things up Signed-off-by: Yee Hing Tong * tests Signed-off-by: Yee Hing Tong * unit test Signed-off-by: Yee Hing Tong * imports for client test Signed-off-by: Yee Hing Tong * more test Signed-off-by: Yee Hing Tong * don't test admin connection Signed-off-by: Yee Hing Tong * disable client for config Signed-off-by: Yee Hing Tong * make generate Signed-off-by: Yee Hing Tong * hide behind a once Signed-off-by: Yee Hing Tong * typo Signed-off-by: Yee Hing Tong * reset client builder test Signed-off-by: Yee Hing Tong * reset client test Signed-off-by: Yee Hing Tong * revert propeller Signed-off-by: Yee Hing Tong * delay invocation even further Signed-off-by: Yee Hing Tong --------- Signed-off-by: Yee Hing Tong --- flytectl/cmd/configuration/configuration.go | 10 +- flytectl/cmd/core/cmd_test.go | 4 +- flyteidl/clients/go/admin/auth_interceptor.go | 108 +++++++++++++----- .../clients/go/admin/auth_interceptor_test.go | 88 ++++++++++++-- flyteidl/clients/go/admin/client.go | 3 +- flyteidl/gen/pb_rust/datacatalog.rs | 9 +- flyteidl/gen/pb_rust/flyteidl.admin.rs | 49 ++++---- flyteidl/gen/pb_rust/flyteidl.cacheservice.rs | 7 +- flyteidl/gen/pb_rust/flyteidl.core.rs | 41 +++---- flyteidl/gen/pb_rust/flyteidl.event.rs | 1 + .../gen/pb_rust/flyteidl.plugins.kubeflow.rs | 3 +- flyteidl/gen/pb_rust/flyteidl.plugins.rs | 11 +- flyteidl/gen/pb_rust/flyteidl.service.rs | 9 +- 13 files changed, 238 insertions(+), 105 deletions(-) diff --git a/flytectl/cmd/configuration/configuration.go b/flytectl/cmd/configuration/configuration.go index ecbedba721..fa9d87a00a 100644 --- a/flytectl/cmd/configuration/configuration.go +++ b/flytectl/cmd/configuration/configuration.go @@ -63,9 +63,13 @@ func CreateConfigCommand() *cobra.Command { configCmd := viper.GetConfigCommand() getResourcesFuncs := map[string]cmdcore.CommandEntry{ - "init": {CmdFunc: configInitFunc, Aliases: []string{""}, ProjectDomainNotRequired: true, - Short: initCmdShort, - Long: initCmdLong, PFlagProvider: initConfig.DefaultConfig}, + "init": { + CmdFunc: configInitFunc, + Aliases: []string{""}, + ProjectDomainNotRequired: true, + DisableFlyteClient: true, + Short: initCmdShort, + Long: initCmdLong, PFlagProvider: initConfig.DefaultConfig}, } configCmd.Flags().BoolVar(&initConfig.DefaultConfig.Force, "force", false, "Force to overwrite the default config file without confirmation") diff --git a/flytectl/cmd/core/cmd_test.go b/flytectl/cmd/core/cmd_test.go index e3a1843105..3f5b3b19a5 100644 --- a/flytectl/cmd/core/cmd_test.go +++ b/flytectl/cmd/core/cmd_test.go @@ -21,7 +21,7 @@ func TestGenerateCommandFunc(t *testing.T) { adminCfg.Endpoint = config.URL{URL: url.URL{Host: "dummyHost"}} adminCfg.AuthType = admin.AuthTypePkce rootCmd := &cobra.Command{} - cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true} + cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true, DisableFlyteClient: true} fn := generateCommandFunc(cmdEntry) assert.Nil(t, fn(rootCmd, []string{})) }) @@ -30,7 +30,7 @@ func TestGenerateCommandFunc(t *testing.T) { adminCfg := admin.GetConfig(context.Background()) adminCfg.Endpoint = config.URL{URL: url.URL{Host: ""}} rootCmd := &cobra.Command{} - cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true} + cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true, DisableFlyteClient: true} fn := generateCommandFunc(cmdEntry) assert.Nil(t, fn(rootCmd, []string{})) }) diff --git a/flyteidl/clients/go/admin/auth_interceptor.go b/flyteidl/clients/go/admin/auth_interceptor.go index 4cebf6440f..5d3d9fd92f 100644 --- a/flyteidl/clients/go/admin/auth_interceptor.go +++ b/flyteidl/clients/go/admin/auth_interceptor.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "sync" "golang.org/x/oauth2" "google.golang.org/grpc" @@ -20,33 +21,10 @@ const ProxyAuthorizationHeader = "proxy-authorization" // MaterializeCredentials will attempt to build a TokenSource given the anonymously available information exposed by the server. // Once established, it'll invoke PerRPCCredentialsFuture.Store() on perRPCCredentials to populate it with the appropriate values. -func MaterializeCredentials(ctx context.Context, cfg *Config, tokenCache cache.TokenCache, - perRPCCredentials *PerRPCCredentialsFuture, proxyCredentialsFuture *PerRPCCredentialsFuture) error { - authMetadataClient, err := InitializeAuthMetadataClient(ctx, cfg, proxyCredentialsFuture) - if err != nil { - return fmt.Errorf("failed to initialized Auth Metadata Client. Error: %w", err) - } - - tokenSourceProvider, err := NewTokenSourceProvider(ctx, cfg, tokenCache, authMetadataClient) - if err != nil { - return fmt.Errorf("failed to initialized token source provider. Err: %w", err) - } - - authorizationMetadataKey := cfg.AuthorizationHeader - if len(authorizationMetadataKey) == 0 { - clientMetadata, err := authMetadataClient.GetPublicClientConfig(ctx, &service.PublicClientAuthConfigRequest{}) - if err != nil { - return fmt.Errorf("failed to fetch client metadata. Error: %v", err) - } - authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey - } - - tokenSource, err := tokenSourceProvider.GetTokenSource(ctx) - if err != nil { - return fmt.Errorf("failed to get token source. Error: %w", err) - } +func MaterializeCredentials(tokenSource oauth2.TokenSource, cfg *Config, authorizationMetadataKey string, + perRPCCredentials *PerRPCCredentialsFuture) error { - _, err = tokenSource.Token() + _, err := tokenSource.Token() if err != nil { return fmt.Errorf("failed to issue token. Error: %w", err) } @@ -127,6 +105,60 @@ func setHTTPClientContext(ctx context.Context, cfg *Config, proxyCredentialsFutu return context.WithValue(ctx, oauth2.HTTPClient, httpClient) } +type OauthMetadataProvider struct { + authorizationMetadataKey string + tokenSource oauth2.TokenSource + once sync.Once +} + +func (o *OauthMetadataProvider) getTokenSourceAndMetadata(cfg *Config, tokenCache cache.TokenCache, proxyCredentialsFuture *PerRPCCredentialsFuture) error { + ctx := context.Background() + + authMetadataClient, err := InitializeAuthMetadataClient(ctx, cfg, proxyCredentialsFuture) + if err != nil { + return fmt.Errorf("failed to initialized Auth Metadata Client. Error: %w", err) + } + + tokenSourceProvider, err := NewTokenSourceProvider(ctx, cfg, tokenCache, authMetadataClient) + if err != nil { + return fmt.Errorf("failed to initialize token source provider. Err: %w", err) + } + + authorizationMetadataKey := cfg.AuthorizationHeader + if len(authorizationMetadataKey) == 0 { + clientMetadata, err := authMetadataClient.GetPublicClientConfig(ctx, &service.PublicClientAuthConfigRequest{}) + if err != nil { + return fmt.Errorf("failed to fetch client metadata. Error: %v", err) + } + authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey + } + + tokenSource, err := tokenSourceProvider.GetTokenSource(ctx) + if err != nil { + return fmt.Errorf("failed to get token source. Error: %w", err) + } + + o.authorizationMetadataKey = authorizationMetadataKey + o.tokenSource = tokenSource + + return nil +} + +func (o *OauthMetadataProvider) GetOauthMetadata(cfg *Config, tokenCache cache.TokenCache, proxyCredentialsFuture *PerRPCCredentialsFuture) error { + // Ensure loadTokenRelated() is only executed once + var err error + o.once.Do(func() { + err = o.getTokenSourceAndMetadata(cfg, tokenCache, proxyCredentialsFuture) + if err != nil { + logger.Errorf(context.Background(), "Failed to load token related config. Error: %v", err) + } + }) + if err != nil { + return err + } + return nil +} + // NewAuthInterceptor creates a new grpc.UnaryClientInterceptor that forwards the grpc call and inspects the error. // It will first invoke the grpc pipeline (to proceed with the request) with no modifications. It's expected for the grpc // pipeline to already have a grpc.WithPerRPCCredentials() DialOption. If the perRPCCredentials has already been initialized, @@ -138,13 +170,26 @@ func setHTTPClientContext(ctx context.Context, cfg *Config, proxyCredentialsFutu // a token source has been created, it'll invoke the grpc pipeline again, this time the grpc.PerRPCCredentials should // be able to find and acquire a valid AccessToken to annotate the request with. func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFuture *PerRPCCredentialsFuture, proxyCredentialsFuture *PerRPCCredentialsFuture) grpc.UnaryClientInterceptor { + + oauthMetadataProvider := OauthMetadataProvider{ + once: sync.Once{}, + } + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = setHTTPClientContext(ctx, cfg, proxyCredentialsFuture) // If there is already a token in the cache (e.g. key-ring), we should use it immediately... t, _ := tokenCache.GetToken() if t != nil { - err := MaterializeCredentials(ctx, cfg, tokenCache, credentialsFuture, proxyCredentialsFuture) + err := oauthMetadataProvider.GetOauthMetadata(cfg, tokenCache, proxyCredentialsFuture) + if err != nil { + return err + } + authorizationMetadataKey := oauthMetadataProvider.authorizationMetadataKey + tokenSource := oauthMetadataProvider.tokenSource + + err = MaterializeCredentials(tokenSource, cfg, authorizationMetadataKey, credentialsFuture) if err != nil { return fmt.Errorf("failed to materialize credentials. Error: %v", err) } @@ -157,6 +202,13 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut if st, ok := status.FromError(err); ok { // If the error we receive from executing the request expects if shouldAttemptToAuthenticate(st.Code()) { + err := oauthMetadataProvider.GetOauthMetadata(cfg, tokenCache, proxyCredentialsFuture) + if err != nil { + return err + } + authorizationMetadataKey := oauthMetadataProvider.authorizationMetadataKey + tokenSource := oauthMetadataProvider.tokenSource + err = func() error { if !tokenCache.TryLock() { tokenCache.CondWait() @@ -171,7 +223,7 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut } logger.Debugf(ctx, "Request failed due to [%v]. Attempting to establish an authenticated connection and trying again.", st.Code()) - newErr := MaterializeCredentials(ctx, cfg, tokenCache, credentialsFuture, proxyCredentialsFuture) + newErr := MaterializeCredentials(tokenSource, cfg, authorizationMetadataKey, credentialsFuture) if newErr != nil { errString := fmt.Sprintf("authentication error! Original Error: %v, Auth Error: %v", err, newErr) logger.Errorf(ctx, errString) diff --git a/flyteidl/clients/go/admin/auth_interceptor_test.go b/flyteidl/clients/go/admin/auth_interceptor_test.go index 10c96625b7..0f47e97b9c 100644 --- a/flyteidl/clients/go/admin/auth_interceptor_test.go +++ b/flyteidl/clients/go/admin/auth_interceptor_test.go @@ -24,6 +24,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/clients/go/admin/cache/mocks" adminMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/logger" @@ -141,11 +142,34 @@ func Test_newAuthInterceptor(t *testing.T) { err := json.Unmarshal(plan, &tokenData) assert.NoError(t, err) t.Run("Other Error", func(t *testing.T) { + ctx := context.Background() + httpPort := rand.IntnRange(10000, 60000) + grpcPort := rand.IntnRange(10000, 60000) + m := &adminMocks.AuthMetadataServiceServer{} + m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(&service.OAuth2MetadataResponse{ + AuthorizationEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/authorize", httpPort), + TokenEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/token", httpPort), + JwksUri: fmt.Sprintf("http://localhost:%d/oauth2/jwks", httpPort), + }, nil) + + m.OnGetPublicClientConfigMatch(mock.Anything, mock.Anything).Return(&service.PublicClientAuthConfigResponse{ + Scopes: []string{"all"}, + }, nil) + + s := newAuthMetadataServer(t, grpcPort, httpPort, m) + assert.NoError(t, s.Start(ctx)) + defer s.Close() + u, err := url.Parse(fmt.Sprintf("dns:///localhost:%d", grpcPort)) + assert.NoError(t, err) f := NewPerRPCCredentialsFuture() p := NewPerRPCCredentialsFuture() mockTokenCache := &mocks.TokenCache{} mockTokenCache.OnGetTokenMatch().Return(&tokenData, nil) - interceptor := NewAuthInterceptor(&Config{}, mockTokenCache, f, p) + mockTokenCache.OnSaveTokenMatch(mock.Anything).Return(nil) + interceptor := NewAuthInterceptor(&Config{ + Endpoint: config.URL{URL: *u}, + UseInsecureConnection: true, + }, mockTokenCache, f, p) otherError := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { return status.New(codes.Canceled, "").Err() } @@ -209,6 +233,14 @@ func Test_newAuthInterceptor(t *testing.T) { httpPort := rand.IntnRange(10000, 60000) grpcPort := rand.IntnRange(10000, 60000) m := &adminMocks.AuthMetadataServiceServer{} + m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(&service.OAuth2MetadataResponse{ + AuthorizationEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/authorize", httpPort), + TokenEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/token", httpPort), + JwksUri: fmt.Sprintf("http://localhost:%d/oauth2/jwks", httpPort), + }, nil) + m.OnGetPublicClientConfigMatch(mock.Anything, mock.Anything).Return(&service.PublicClientAuthConfigResponse{ + Scopes: []string{"all"}, + }, nil) s := newAuthMetadataServer(t, grpcPort, httpPort, m) ctx := context.Background() assert.NoError(t, s.Start(ctx)) @@ -283,12 +315,13 @@ func Test_newAuthInterceptor(t *testing.T) { }) } -func TestMaterializeCredentials(t *testing.T) { +func TestNewAuthInterceptorAndMaterialize(t *testing.T) { t.Run("No oauth2 metadata endpoint or Public client config lookup", func(t *testing.T) { httpPort := rand.IntnRange(10000, 60000) grpcPort := rand.IntnRange(10000, 60000) + fakeToken := &oauth2.Token{} c := &mocks.TokenCache{} - c.OnGetTokenMatch().Return(nil, nil) + c.OnGetTokenMatch().Return(fakeToken, nil) c.OnSaveTokenMatch(mock.Anything).Return(nil) m := &adminMocks.AuthMetadataServiceServer{} m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(nil, errors.New("unexpected call to get oauth2 metadata")) @@ -304,7 +337,7 @@ func TestMaterializeCredentials(t *testing.T) { f := NewPerRPCCredentialsFuture() p := NewPerRPCCredentialsFuture() - err = MaterializeCredentials(ctx, &Config{ + cfg := &Config{ Endpoint: config.URL{URL: *u}, UseInsecureConnection: true, AuthType: AuthTypeClientSecret, @@ -312,14 +345,22 @@ func TestMaterializeCredentials(t *testing.T) { Scopes: []string{"all"}, Audience: "http://localhost:30081", AuthorizationHeader: "authorization", - }, c, f, p) + } + + intercept := NewAuthInterceptor(cfg, c, f, p) + // Invoke Materialize inside the intercept + err = intercept(ctx, "GET", nil, nil, nil, func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + return nil + }) assert.NoError(t, err) }) + t.Run("Failed to fetch client metadata", func(t *testing.T) { httpPort := rand.IntnRange(10000, 60000) grpcPort := rand.IntnRange(10000, 60000) c := &mocks.TokenCache{} - c.OnGetTokenMatch().Return(nil, nil) + fakeToken := &oauth2.Token{} + c.OnGetTokenMatch().Return(fakeToken, nil) c.OnSaveTokenMatch(mock.Anything).Return(nil) m := &adminMocks.AuthMetadataServiceServer{} m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(nil, errors.New("unexpected call to get oauth2 metadata")) @@ -333,17 +374,44 @@ func TestMaterializeCredentials(t *testing.T) { u, err := url.Parse(fmt.Sprintf("dns:///localhost:%d", grpcPort)) assert.NoError(t, err) + cfg := &Config{ + Endpoint: config.URL{URL: *u}, + UseInsecureConnection: true, + AuthType: AuthTypeClientSecret, + TokenURL: fmt.Sprintf("http://localhost:%d/api/v1/token", httpPort), + Scopes: []string{"all"}, + } f := NewPerRPCCredentialsFuture() p := NewPerRPCCredentialsFuture() + intercept := NewAuthInterceptor(cfg, c, f, p) + err = intercept(ctx, "GET", nil, nil, nil, func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + return nil + }) + assert.EqualError(t, err, "failed to fetch client metadata. Error: rpc error: code = Unknown desc = expected err") + }) +} + +func TestSimpleMaterializeCredentials(t *testing.T) { + t.Run("simple materialize", func(t *testing.T) { + httpPort := rand.IntnRange(10000, 60000) + grpcPort := rand.IntnRange(10000, 60000) + u, err := url.Parse(fmt.Sprintf("dns:///localhost:%d", grpcPort)) + assert.NoError(t, err) + + f := NewPerRPCCredentialsFuture() - err = MaterializeCredentials(ctx, &Config{ + dummySource := DummyTestTokenSource{} + + err = MaterializeCredentials(dummySource, &Config{ Endpoint: config.URL{URL: *u}, UseInsecureConnection: true, AuthType: AuthTypeClientSecret, - TokenURL: fmt.Sprintf("http://localhost:%d/api/v1/token", httpPort), + TokenURL: fmt.Sprintf("http://localhost:%d/oauth2/token", httpPort), Scopes: []string{"all"}, - }, c, f, p) - assert.EqualError(t, err, "failed to fetch client metadata. Error: rpc error: code = Unknown desc = expected err") + Audience: "http://localhost:30081", + AuthorizationHeader: "authorization", + }, "authorization", f) + assert.NoError(t, err) }) } diff --git a/flyteidl/clients/go/admin/client.go b/flyteidl/clients/go/admin/client.go index 9758bd9dec..757f25b160 100644 --- a/flyteidl/clients/go/admin/client.go +++ b/flyteidl/clients/go/admin/client.go @@ -179,8 +179,9 @@ func initializeClients(ctx context.Context, cfg *Config, tokenCache cache.TokenC credentialsFuture := NewPerRPCCredentialsFuture() proxyCredentialsFuture := NewPerRPCCredentialsFuture() + authInterceptor := NewAuthInterceptor(cfg, tokenCache, credentialsFuture, proxyCredentialsFuture) opts = append(opts, - grpc.WithChainUnaryInterceptor(NewAuthInterceptor(cfg, tokenCache, credentialsFuture, proxyCredentialsFuture)), + grpc.WithChainUnaryInterceptor(authInterceptor), grpc.WithPerRPCCredentials(credentialsFuture)) if cfg.DefaultServiceConfig != "" { diff --git a/flyteidl/gen/pb_rust/datacatalog.rs b/flyteidl/gen/pb_rust/datacatalog.rs index f181704954..b49cab340c 100644 --- a/flyteidl/gen/pb_rust/datacatalog.rs +++ b/flyteidl/gen/pb_rust/datacatalog.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// /// Request message for creating a Dataset. #[allow(clippy::derive_partial_eq_without_eq)] @@ -10,7 +11,7 @@ pub struct CreateDatasetRequest { /// /// Response message for creating a Dataset #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CreateDatasetResponse { } /// @@ -74,7 +75,7 @@ pub struct CreateArtifactRequest { /// /// Response message for creating an Artifact. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CreateArtifactResponse { } /// @@ -88,7 +89,7 @@ pub struct AddTagRequest { /// /// Response message for tagging an Artifact. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct AddTagResponse { } /// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. @@ -245,7 +246,7 @@ pub struct ReleaseReservationRequest { } /// Response to release reservation #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ReleaseReservationResponse { } /// diff --git a/flyteidl/gen/pb_rust/flyteidl.admin.rs b/flyteidl/gen/pb_rust/flyteidl.admin.rs index ca3270264b..30f39ab45d 100644 --- a/flyteidl/gen/pb_rust/flyteidl.admin.rs +++ b/flyteidl/gen/pb_rust/flyteidl.admin.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// Represents a subset of runtime task execution metadata that are relevant to external plugins. /// /// ID of the task execution @@ -194,7 +195,7 @@ pub struct DeleteTaskRequest { } /// Response to delete a task. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DeleteTaskResponse { } /// A message containing the agent metadata. @@ -246,7 +247,7 @@ pub struct GetAgentResponse { } /// A request to list all agents. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ListAgentsRequest { } /// A response containing a list of agents. @@ -608,7 +609,7 @@ pub struct NamedEntityUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NamedEntityUpdateResponse { } /// Shared request structure to fetch a single resource. @@ -1003,7 +1004,7 @@ pub struct WorkflowExecutionEventRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowExecutionEventResponse { } /// Request to send a notification that a node execution event has occurred. @@ -1019,7 +1020,7 @@ pub struct NodeExecutionEventRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NodeExecutionEventResponse { } /// Request to send a notification that a task execution event has occurred. @@ -1035,7 +1036,7 @@ pub struct TaskExecutionEventRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskExecutionEventResponse { } /// Defines a set of overridable task resource attributes set during task registration. @@ -1717,7 +1718,7 @@ pub struct ExecutionTerminateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ExecutionTerminateResponse { } /// Request structure to fetch inputs, output and other data produced by an execution. @@ -1774,7 +1775,7 @@ pub struct ExecutionStateChangeDetails { pub principal: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ExecutionUpdateResponse { } /// WorkflowExecutionGetMetricsRequest represents a request to retrieve metrics for the specified workflow execution. @@ -1828,7 +1829,7 @@ impl ExecutionState { } /// Option for schedules run at a certain frequency e.g. every 2 minutes. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct FixedRate { #[prost(uint32, tag="1")] pub value: u32, @@ -1919,7 +1920,7 @@ pub struct LaunchPlanCreateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct LaunchPlanCreateResponse { } /// A LaunchPlan provides the capability to templatize workflow executions. @@ -2084,7 +2085,7 @@ pub struct LaunchPlanUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct LaunchPlanUpdateResponse { } /// Represents a request struct for finding an active launch plan for a given NamedEntityIdentifier @@ -2460,7 +2461,7 @@ pub struct EmailMessage { } /// Empty request for GetDomain #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetDomainRequest { } /// Namespace within a project commonly used to differentiate between different service instances. @@ -2596,12 +2597,12 @@ pub struct ProjectRegisterRequest { } /// Purposefully empty, may be updated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectRegisterResponse { } /// Purposefully empty, may be updated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectUpdateResponse { } #[allow(clippy::derive_partial_eq_without_eq)] @@ -2652,7 +2653,7 @@ pub struct ProjectAttributesUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectAttributesUpdateResponse { } /// Request to get an individual project level attribute override. @@ -2699,7 +2700,7 @@ pub struct ProjectAttributesDeleteRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectAttributesDeleteResponse { } /// Defines a set of custom matching attributes which defines resource defaults for a project and domain. @@ -2730,7 +2731,7 @@ pub struct ProjectDomainAttributesUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectDomainAttributesUpdateResponse { } /// Request to get an individual project domain attribute override. @@ -2785,7 +2786,7 @@ pub struct ProjectDomainAttributesDeleteRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectDomainAttributesDeleteResponse { } /// SignalGetOrCreateRequest represents a request structure to retrieve or create a signal. @@ -2857,7 +2858,7 @@ pub struct SignalSetRequest { /// /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SignalSetResponse { } /// Signal encapsulates a unique identifier, associated metadata, and a value for a single Flyte @@ -2895,7 +2896,7 @@ pub struct TaskCreateRequest { /// /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskCreateResponse { } /// Flyte workflows are composed of many ordered tasks. That is small, reusable, self-contained logical blocks @@ -3155,7 +3156,7 @@ pub struct Version { } /// Empty request for GetVersion #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetVersionRequest { } /// Represents a request structure to create a revision of a workflow. @@ -3174,7 +3175,7 @@ pub struct WorkflowCreateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowCreateResponse { } /// Represents the workflow structure stored in the Admin @@ -3295,7 +3296,7 @@ pub struct WorkflowAttributesUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowAttributesUpdateResponse { } /// Request to get an individual workflow attribute override. @@ -3357,7 +3358,7 @@ pub struct WorkflowAttributesDeleteRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowAttributesDeleteResponse { } // @@protoc_insertion_point(module) diff --git a/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs b/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs index ff3264c633..d63e4d31bf 100644 --- a/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs +++ b/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// /// Additional metadata as key-value pairs #[allow(clippy::derive_partial_eq_without_eq)] @@ -88,7 +89,7 @@ pub struct PutCacheRequest { /// /// Empty, success indicated by no errors #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PutCacheResponse { } /// @@ -105,7 +106,7 @@ pub struct DeleteCacheRequest { /// /// Empty, success indicated by no errors #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DeleteCacheResponse { } /// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. @@ -166,7 +167,7 @@ pub struct ReleaseReservationRequest { /// /// Empty, success indicated by no errors #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ReleaseReservationResponse { } include!("flyteidl.cacheservice.tonic.rs"); diff --git a/flyteidl/gen/pb_rust/flyteidl.core.rs b/flyteidl/gen/pb_rust/flyteidl.core.rs index 0876c70d6f..f2b73c9b11 100644 --- a/flyteidl/gen/pb_rust/flyteidl.core.rs +++ b/flyteidl/gen/pb_rust/flyteidl.core.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// Defines schema columns and types to strongly type-validate schemas interoperability. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -375,7 +376,7 @@ pub mod primitive { /// Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally /// undefined since it can be assigned to a scalar of any LiteralType. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Void { } /// Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is. @@ -600,7 +601,7 @@ pub struct KeyValuePair { } /// Retry strategy associated with an executable unit. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RetryStrategy { /// Number of retries. Retries will be consumed when the job fails with a recoverable error. /// The number of retries must be less than or equals to 10. @@ -770,7 +771,7 @@ pub struct InputBindingData { pub var: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RuntimeBinding { } #[allow(clippy::derive_partial_eq_without_eq)] @@ -1390,7 +1391,7 @@ pub mod task_metadata { /// Identify whether task is interruptible #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum InterruptibleValue { #[prost(bool, tag="8")] Interruptible(bool), @@ -1460,7 +1461,7 @@ pub mod task_template { /// Defines port properties for a container. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ContainerPort { /// Number of port to expose on the pod's IP address. /// This must be a valid port number, 0 < x < 65536. @@ -1551,7 +1552,7 @@ pub mod container { } /// Strategy to use when dealing with Blob, Schema, or multipart blob data (large datasets) #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct IoStrategy { /// Mode to use to manage downloads #[prost(enumeration="io_strategy::DownloadMode", tag="1")] @@ -1926,7 +1927,7 @@ pub mod conjunction_expression { } /// Indicates various phases of Workflow Execution #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowExecution { } /// Nested message and enum types in `WorkflowExecution`. @@ -1984,7 +1985,7 @@ pub mod workflow_execution { } /// Indicates various phases of Node Execution that only include the time spent to run the nodes/workflows #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NodeExecution { } /// Nested message and enum types in `NodeExecution`. @@ -2046,7 +2047,7 @@ pub mod node_execution { /// Phases that task plugins can go through. Not all phases may be applicable to a specific plugin task, /// but this is the cumulative list that customers may want to know about for their task. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskExecution { } /// Nested message and enum types in `TaskExecution`. @@ -2200,7 +2201,7 @@ pub mod task_log { } /// Represents customized execution run-time attributes. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct QualityOfServiceSpec { /// Indicates how much queueing delay an execution can tolerate. #[prost(message, optional, tag="1")] @@ -2208,7 +2209,7 @@ pub struct QualityOfServiceSpec { } /// Indicates the priority of an execution. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct QualityOfService { #[prost(oneof="quality_of_service::Designation", tags="1, 2")] pub designation: ::core::option::Option, @@ -2249,7 +2250,7 @@ pub mod quality_of_service { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum Designation { #[prost(enumeration="Tier", tag="1")] Tier(i32), @@ -2369,7 +2370,7 @@ pub struct SignalCondition { } /// SleepCondition represents a dependency on waiting for the specified duration. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SleepCondition { /// The overall duration for this sleep. #[prost(message, optional, tag="1")] @@ -2448,7 +2449,7 @@ pub mod array_node { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum ParallelismOption { /// parallelism defines the minimum number of instances to bring up concurrently at any given /// point. Note that this is an optimistic restriction and that, due to network partitioning or @@ -2458,7 +2459,7 @@ pub mod array_node { Parallelism(u32), } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum SuccessCriteria { /// min_successes is an absolute number of the minimum number of successful completions of /// sub-nodes. As soon as this criteria is met, the ArrayNode will be marked as successful @@ -2502,14 +2503,14 @@ pub struct NodeMetadata { pub mod node_metadata { /// Identify whether node is interruptible #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum InterruptibleValue { #[prost(bool, tag="6")] Interruptible(bool), } /// Identify whether a node should have it's outputs cached. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum CacheableValue { #[prost(bool, tag="7")] Cacheable(bool), @@ -2523,7 +2524,7 @@ pub mod node_metadata { } /// Identify whether caching operations involving this node should be serialized. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum CacheSerializableValue { #[prost(bool, tag="9")] CacheSerializable(bool), @@ -2653,7 +2654,7 @@ pub mod workflow_metadata { /// If you are adding a setting that applies to both the Workflow itself, and everything underneath it, it should be /// added to both this object and the WorkflowMetadata object above. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowMetadataDefaults { /// Whether child nodes of the workflow are interruptible. #[prost(bool, tag="1")] @@ -2884,7 +2885,7 @@ pub mod catalog_metadata { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CatalogReservation { } /// Nested message and enum types in `CatalogReservation`. diff --git a/flyteidl/gen/pb_rust/flyteidl.event.rs b/flyteidl/gen/pb_rust/flyteidl.event.rs index 281ee07daa..80a8a11442 100644 --- a/flyteidl/gen/pb_rust/flyteidl.event.rs +++ b/flyteidl/gen/pb_rust/flyteidl.event.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WorkflowExecutionEvent { diff --git a/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs b/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs index 9eebb7bc9e..b2a4d69f57 100644 --- a/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs +++ b/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs @@ -1,6 +1,7 @@ // @generated +// This file is @generated by prost-build. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RunPolicy { /// Defines the policy to kill pods after the job completes. Default to None. #[prost(enumeration="CleanPodPolicy", tag="1")] diff --git a/flyteidl/gen/pb_rust/flyteidl.plugins.rs b/flyteidl/gen/pb_rust/flyteidl.plugins.rs index 28c2f77e97..0252c9d882 100644 --- a/flyteidl/gen/pb_rust/flyteidl.plugins.rs +++ b/flyteidl/gen/pb_rust/flyteidl.plugins.rs @@ -1,8 +1,9 @@ // @generated +// This file is @generated by prost-build. /// Describes a job that can process independent pieces of data concurrently. Multiple copies of the runnable component /// will be executed concurrently. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ArrayJob { /// Defines the maximum number of instances to bring up concurrently at any given point. Note that this is an /// optimistic restriction and that, due to network partitioning or other failures, the actual number of currently @@ -20,7 +21,7 @@ pub struct ArrayJob { /// Nested message and enum types in `ArrayJob`. pub mod array_job { #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum SuccessCriteria { /// An absolute number of the minimum number of successful completions of subtasks. As soon as this criteria is met, /// the array job will be marked as successful and outputs will be computed. This has to be a non-negative number if @@ -120,7 +121,7 @@ pub struct DaskWorkerGroup { /// MPI operator proposal /// Custom proto for plugin that enables distributed training using #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DistributedMpiTrainingTask { /// number of worker spawned in the cluster for this job #[prost(int32, tag="1")] @@ -277,7 +278,7 @@ pub struct WorkerGroupSpec { pub ray_start_params: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SparkApplication { } /// Nested message and enum types in `SparkApplication`. @@ -347,7 +348,7 @@ pub struct SparkJob { } /// Custom proto for plugin that enables distributed training using #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DistributedTensorflowTrainingTask { /// number of worker replicas spawned in the cluster for this job #[prost(int32, tag="1")] diff --git a/flyteidl/gen/pb_rust/flyteidl.service.rs b/flyteidl/gen/pb_rust/flyteidl.service.rs index 2fb065da4e..8c5a33de9a 100644 --- a/flyteidl/gen/pb_rust/flyteidl.service.rs +++ b/flyteidl/gen/pb_rust/flyteidl.service.rs @@ -1,6 +1,7 @@ // @generated +// This file is @generated by prost-build. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct OAuth2MetadataRequest { } /// OAuth2MetadataResponse defines an RFC-Compliant response for /.well-known/oauth-authorization-server metadata @@ -44,7 +45,7 @@ pub struct OAuth2MetadataResponse { pub device_authorization_endpoint: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PublicClientAuthConfigRequest { } /// FlyteClientResponse encapsulates public information that flyte clients (CLIs... etc.) can use to authenticate users. @@ -335,7 +336,7 @@ pub struct TaskDeleteRequest { } /// Response to delete a task. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskDeleteResponse { } /// The state of the execution is used to control its visibility in the UI/CLI. @@ -375,7 +376,7 @@ impl State { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct UserInfoRequest { } /// See the OpenID Connect spec at for more information. From d2614d416cd2565dc6a91f0ccff63f7a0dbdf970 Mon Sep 17 00:00:00 2001 From: Flyte Bot Date: Fri, 23 Aug 2024 16:07:24 -0700 Subject: [PATCH 7/8] Update Flyte components - v1.13.1-rc1 (#5691) * Update Flyte Components Signed-off-by: Flyte-Bot * Add changelog and bump version in conf.py Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Flyte-Bot Signed-off-by: Eduardo Apolinario Co-authored-by: eapolinario --- CHANGELOG/CHANGELOG-v1.13.1-rc1.md | 35 ++++++++++++++++++ charts/flyte-binary/README.md | 2 +- charts/flyte-binary/values.yaml | 2 +- charts/flyte-core/README.md | 12 +++---- charts/flyte-core/values.yaml | 10 +++--- charts/flyte/README.md | 16 ++++----- charts/flyte/values.yaml | 10 +++--- charts/flyteagent/README.md | 2 +- charts/flyteagent/values.yaml | 2 +- .../agent/flyte_agent_helm_generated.yaml | 2 +- .../flyte_aws_scheduler_helm_generated.yaml | 30 ++++++++-------- .../flyte_helm_controlplane_generated.yaml | 20 +++++------ .../eks/flyte_helm_dataplane_generated.yaml | 14 ++++---- deployment/eks/flyte_helm_generated.yaml | 34 +++++++++--------- .../flyte_helm_controlplane_generated.yaml | 20 +++++------ .../gcp/flyte_helm_dataplane_generated.yaml | 14 ++++---- deployment/gcp/flyte_helm_generated.yaml | 34 +++++++++--------- .../flyte_sandbox_binary_helm_generated.yaml | 4 +-- deployment/sandbox/flyte_helm_generated.yaml | 34 +++++++++--------- .../manifests/complete-agent.yaml | 10 +++--- .../sandbox-bundled/manifests/complete.yaml | 8 ++--- docker/sandbox-bundled/manifests/dev.yaml | 4 +-- docs/conf.py | 2 +- .../generated/flyteadmin_config.rst | 36 +++++++++++++++++++ .../generated/flytepropeller_config.rst | 36 +++++++++++++++++++ .../generated/scheduler_config.rst | 36 +++++++++++++++++++ 26 files changed, 286 insertions(+), 143 deletions(-) create mode 100644 CHANGELOG/CHANGELOG-v1.13.1-rc1.md diff --git a/CHANGELOG/CHANGELOG-v1.13.1-rc1.md b/CHANGELOG/CHANGELOG-v1.13.1-rc1.md new file mode 100644 index 0000000000..b9b3099f39 --- /dev/null +++ b/CHANGELOG/CHANGELOG-v1.13.1-rc1.md @@ -0,0 +1,35 @@ +# Flyte v1.13.0-rc1 Release Notes + +## What's Changed +* Add CustomHeaderMatcher to pass additional headers by @andrewwdye in https://github.com/flyteorg/flyte/pull/5563 +* Turn flyteidl and flytectl releases into manual gh workflows by @eapolinario in https://github.com/flyteorg/flyte/pull/5635 +* docs: fix typo by @cratiu222 in https://github.com/flyteorg/flyte/pull/5643 +* Use enable_deck=True in docs by @thomasjpfan in https://github.com/flyteorg/flyte/pull/5645 +* Fix flyteidl release checkout all tags by @eapolinario in https://github.com/flyteorg/flyte/pull/5646 +* Install pyarrow in sandbox functional tests by @eapolinario in https://github.com/flyteorg/flyte/pull/5647 +* docs: add documentation for configuring notifications in GCP by @desihsu in https://github.com/flyteorg/flyte/pull/5545 +* Correct "sucessfile" to "successfile" by @shengyu7697 in https://github.com/flyteorg/flyte/pull/5652 +* Fix ordering for custom template values in cluster resource controller by @katrogan in https://github.com/flyteorg/flyte/pull/5648 +* Don't error when attempting to trigger schedules for inactive projects by @katrogan in https://github.com/flyteorg/flyte/pull/5649 +* Update Flyte components - v1.13.1-rc0 by @flyte-bot in https://github.com/flyteorg/flyte/pull/5656 +* Add offloaded path to literal by @katrogan in https://github.com/flyteorg/flyte/pull/5660 +* Improve error messaging for invalid arguments by @pingsutw in https://github.com/flyteorg/flyte/pull/5658 +* DOC-462 Update "Try Flyte in the browser" text by @neverett in https://github.com/flyteorg/flyte/pull/5654 +* DOC-533 Remove outdated duplicate notification config content by @neverett in https://github.com/flyteorg/flyte/pull/5672 +* Validate labels before creating flyte CRD by @pingsutw in https://github.com/flyteorg/flyte/pull/5671 +* Add FLYTE_INTERNAL_POD_NAME environment variable that holds the pod name by @bgedik in https://github.com/flyteorg/flyte/pull/5616 +* Upstream Using InMemory token cache for admin clientset in propeller by @pvditt in https://github.com/flyteorg/flyte/pull/5621 +* [Bug] Update resource failures w/ Finalizers set (#423) by @pvditt in https://github.com/flyteorg/flyte/pull/5673 +* [BUG] array node eventing bump version by @pvditt in https://github.com/flyteorg/flyte/pull/5680 +* Add custominfo to agents by @ddl-rliu in https://github.com/flyteorg/flyte/pull/5604 +* [BUG] use deep copy of bit arrays when getting array node state by @pvditt in https://github.com/flyteorg/flyte/pull/5681 +* More concise definition of launchplan by @eapolinario in https://github.com/flyteorg/flyte/pull/5682 +* Auth/prevent lookup per call by @wild-endeavor in https://github.com/flyteorg/flyte/pull/5686 + +## New Contributors +* @cratiu222 made their first contribution in https://github.com/flyteorg/flyte/pull/5643 +* @desihsu made their first contribution in https://github.com/flyteorg/flyte/pull/5545 +* @shengyu7697 made their first contribution in https://github.com/flyteorg/flyte/pull/5652 +* @bgedik made their first contribution in https://github.com/flyteorg/flyte/pull/5616 + +**Full Changelog**: https://github.com/flyteorg/flyte/compare/flytectl/v0.9.1...v1.13.1-rc1 diff --git a/charts/flyte-binary/README.md b/charts/flyte-binary/README.md index 64feee5d89..350391fd53 100644 --- a/charts/flyte-binary/README.md +++ b/charts/flyte-binary/README.md @@ -42,7 +42,7 @@ Chart for basic single Flyte executable deployment | configuration.auth.oidc.clientId | string | `""` | | | configuration.auth.oidc.clientSecret | string | `""` | | | configuration.co-pilot.image.repository | string | `"cr.flyte.org/flyteorg/flytecopilot"` | | -| configuration.co-pilot.image.tag | string | `"v1.13.1-rc0"` | | +| configuration.co-pilot.image.tag | string | `"v1.13.1-rc1"` | | | configuration.database.dbname | string | `"flyte"` | | | configuration.database.host | string | `"127.0.0.1"` | | | configuration.database.options | string | `"sslmode=disable"` | | diff --git a/charts/flyte-binary/values.yaml b/charts/flyte-binary/values.yaml index b3ea6877f7..8821126da2 100644 --- a/charts/flyte-binary/values.yaml +++ b/charts/flyte-binary/values.yaml @@ -159,7 +159,7 @@ configuration: # repository CoPilot sidecar image repository repository: cr.flyte.org/flyteorg/flytecopilot # FLYTECOPILOT_IMAGE # tag CoPilot sidecar image tag - tag: v1.13.1-rc0 # FLYTECOPILOT_TAG + tag: v1.13.1-rc1 # FLYTECOPILOT_TAG # agentService Flyte Agent configuration agentService: defaultAgent: diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index cf4f511a2e..14b938d2cf 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -95,8 +95,8 @@ helm install gateway bitnami/contour -n flyte | configmap.clusters.clusterConfigs | list | `[]` | | | configmap.clusters.labelClusterMap | object | `{}` | | | configmap.console | object | `{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"}` | Configuration for Flyte console UI | -| configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | -| configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | +| configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | +| configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | | configmap.core | object | `{"manager":{"pod-application":"flytepropeller","pod-template-container-name":"flytepropeller","pod-template-name":"flytepropeller-template"},"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}}` | Core propeller configuration | | configmap.core.manager | object | `{"pod-application":"flytepropeller","pod-template-container-name":"flytepropeller","pod-template-name":"flytepropeller-template"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/manager/config#Config). | | configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | @@ -130,7 +130,7 @@ helm install gateway bitnami/contour -n flyte | datacatalog.extraArgs | object | `{}` | Appends extra command line arguments to the main command | | datacatalog.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | datacatalog.image.repository | string | `"cr.flyte.org/flyteorg/datacatalog"` | Docker image for Datacatalog deployment | -| datacatalog.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| datacatalog.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | datacatalog.nodeSelector | object | `{}` | nodeSelector for Datacatalog deployment | | datacatalog.podAnnotations | object | `{}` | Annotations for Datacatalog pods | | datacatalog.podEnv | object | `{}` | Additional Datacatalog container environment variables | @@ -166,7 +166,7 @@ helm install gateway bitnami/contour -n flyte | flyteadmin.extraArgs | object | `{}` | Appends extra command line arguments to the serve command | | flyteadmin.image.pullPolicy | string | `"IfNotPresent"` | | | flyteadmin.image.repository | string | `"cr.flyte.org/flyteorg/flyteadmin"` | Docker image for Flyteadmin deployment | -| flyteadmin.image.tag | string | `"v1.13.1-rc0"` | | +| flyteadmin.image.tag | string | `"v1.13.1-rc1"` | | | flyteadmin.initialProjects | list | `["flytesnacks","flytetester","flyteexamples"]` | Initial projects to create | | flyteadmin.nodeSelector | object | `{}` | nodeSelector for Flyteadmin deployment | | flyteadmin.podAnnotations | object | `{}` | Annotations for Flyteadmin pods | @@ -238,7 +238,7 @@ helm install gateway bitnami/contour -n flyte | flytepropeller.extraArgs | object | `{}` | Appends extra command line arguments to the main command | | flytepropeller.image.pullPolicy | string | `"IfNotPresent"` | | | flytepropeller.image.repository | string | `"cr.flyte.org/flyteorg/flytepropeller"` | Docker image for Flytepropeller deployment | -| flytepropeller.image.tag | string | `"v1.13.1-rc0"` | | +| flytepropeller.image.tag | string | `"v1.13.1-rc1"` | | | flytepropeller.manager | bool | `false` | | | flytepropeller.nodeSelector | object | `{}` | nodeSelector for Flytepropeller deployment | | flytepropeller.podAnnotations | object | `{}` | Annotations for Flytepropeller pods | @@ -270,7 +270,7 @@ helm install gateway bitnami/contour -n flyte | flytescheduler.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flytescheduler.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flytescheduler.image.repository | string | `"cr.flyte.org/flyteorg/flytescheduler"` | Docker image for Flytescheduler deployment | -| flytescheduler.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flytescheduler.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flytescheduler.nodeSelector | object | `{}` | nodeSelector for Flytescheduler deployment | | flytescheduler.podAnnotations | object | `{}` | Annotations for Flytescheduler pods | | flytescheduler.podEnv | object | `{}` | Additional Flytescheduler container environment variables | diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 2c9e02427e..4462372d95 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -16,7 +16,7 @@ flyteadmin: image: # -- Docker image for Flyteadmin deployment repository: cr.flyte.org/flyteorg/flyteadmin # FLYTEADMIN_IMAGE - tag: v1.13.1-rc0 # FLYTEADMIN_TAG + tag: v1.13.1-rc1 # FLYTEADMIN_TAG pullPolicy: IfNotPresent # -- Additional flyteadmin container environment variables # @@ -144,7 +144,7 @@ flytescheduler: # -- Docker image for Flytescheduler deployment repository: cr.flyte.org/flyteorg/flytescheduler # FLYTESCHEDULER_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTESCHEDULER_TAG + tag: v1.13.1-rc1 # FLYTESCHEDULER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytescheduler deployment @@ -210,7 +210,7 @@ datacatalog: # -- Docker image for Datacatalog deployment repository: cr.flyte.org/flyteorg/datacatalog # DATACATALOG_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # DATACATALOG_TAG + tag: v1.13.1-rc1 # DATACATALOG_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Datacatalog deployment @@ -309,7 +309,7 @@ flytepropeller: image: # -- Docker image for Flytepropeller deployment repository: cr.flyte.org/flyteorg/flytepropeller # FLYTEPROPELLER_IMAGE - tag: v1.13.1-rc0 # FLYTEPROPELLER_TAG + tag: v1.13.1-rc1 # FLYTEPROPELLER_TAG pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytepropeller deployment resources: @@ -801,7 +801,7 @@ configmap: # -- Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) co-pilot: name: flyte-copilot- - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 # FLYTECOPILOT_IMAGE + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 # FLYTECOPILOT_IMAGE start-timeout: 30s # -- Core propeller configuration diff --git a/charts/flyte/README.md b/charts/flyte/README.md index 321b9e22a3..4a3a911d00 100644 --- a/charts/flyte/README.md +++ b/charts/flyte/README.md @@ -71,7 +71,7 @@ helm upgrade -f values-sandbox.yaml flyte . | contour.tolerations | list | `[]` | tolerations for Contour deployment | | daskoperator | object | `{"enabled":false}` | Optional: Dask Plugin using the Dask Operator | | daskoperator.enabled | bool | `false` | - enable or disable the dask operator deployment installation | -| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1-rc0"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1-rc0"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1-rc0"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1-rc0"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | +| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1-rc1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1-rc1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1-rc1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1-rc1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | | flyte.cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | flyte.cluster_resource_manager.config.cluster_resources | object | `{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}` | ClusterResource parameters Refer to the [structure](https://pkg.go.dev/github.com/lyft/flyteadmin@v0.3.37/pkg/runtime/interfaces#ClusterResourceConfig) to customize. | | flyte.cluster_resource_manager.config.cluster_resources.standaloneDeployment | bool | `false` | Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints | @@ -91,15 +91,15 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.common.ingress.separateGrpcIngressAnnotations | object | `{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"}` | - Extra Ingress annotations applied only to the GRPC ingress. Only makes sense if `separateGrpcIngress` is enabled. | | flyte.common.ingress.tls | object | `{"enabled":false}` | - TLS Settings | | flyte.common.ingress.webpackHMR | bool | `true` | - Enable or disable HMR route to flyteconsole. This is useful only for frontend development. | -| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | +| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | | flyte.configmap.adminServer | object | `{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}}` | FlyteAdmin server configuration | | flyte.configmap.adminServer.auth | object | `{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}}` | Authentication configuration | | flyte.configmap.adminServer.server.security.secure | bool | `false` | Controls whether to serve requests over SSL/TLS. | | flyte.configmap.adminServer.server.security.useAuth | bool | `false` | Controls whether to enforce authentication. Follow the guide in https://docs.flyte.org/ on how to setup authentication. | | flyte.configmap.catalog | object | `{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}}` | Catalog Client configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/catalog#Config) Additional advanced Catalog configuration [here](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/pluginmachinery/catalog#Config) | | flyte.configmap.console | object | `{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"}` | Configuration for Flyte console UI | -| flyte.configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | -| flyte.configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | +| flyte.configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | +| flyte.configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | | flyte.configmap.core | object | `{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}}` | Core propeller configuration | | flyte.configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | | flyte.configmap.datacatalogServer | object | `{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}}` | Datacatalog server config | @@ -120,7 +120,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.datacatalog.configPath | string | `"/etc/datacatalog/config/*.yaml"` | Default regex string for searching configuration files | | flyte.datacatalog.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.datacatalog.image.repository | string | `"cr.flyte.org/flyteorg/datacatalog"` | Docker image for Datacatalog deployment | -| flyte.datacatalog.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.datacatalog.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.datacatalog.nodeSelector | object | `{}` | nodeSelector for Datacatalog deployment | | flyte.datacatalog.podAnnotations | object | `{}` | Annotations for Datacatalog pods | | flyte.datacatalog.replicaCount | int | `1` | Replicas count for Datacatalog deployment | @@ -136,7 +136,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flyteadmin.env | list | `[]` | Additional flyteadmin container environment variables e.g. SendGrid's API key - name: SENDGRID_API_KEY value: "" e.g. secret environment variable (you can combine it with .additionalVolumes): - name: SENDGRID_API_KEY valueFrom: secretKeyRef: name: sendgrid-secret key: api_key | | flyte.flyteadmin.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flyteadmin.image.repository | string | `"cr.flyte.org/flyteorg/flyteadmin"` | Docker image for Flyteadmin deployment | -| flyte.flyteadmin.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.flyteadmin.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.flyteadmin.initialProjects | list | `["flytesnacks","flytetester","flyteexamples"]` | Initial projects to create | | flyte.flyteadmin.nodeSelector | object | `{}` | nodeSelector for Flyteadmin deployment | | flyte.flyteadmin.podAnnotations | object | `{}` | Annotations for Flyteadmin pods | @@ -162,7 +162,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flytepropeller.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flyte.flytepropeller.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flytepropeller.image.repository | string | `"cr.flyte.org/flyteorg/flytepropeller"` | Docker image for Flytepropeller deployment | -| flyte.flytepropeller.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.flytepropeller.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.flytepropeller.nodeSelector | object | `{}` | nodeSelector for Flytepropeller deployment | | flyte.flytepropeller.podAnnotations | object | `{}` | Annotations for Flytepropeller pods | | flyte.flytepropeller.replicaCount | int | `1` | Replicas count for Flytepropeller deployment | @@ -176,7 +176,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flytescheduler.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flyte.flytescheduler.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flytescheduler.image.repository | string | `"cr.flyte.org/flyteorg/flytescheduler"` | Docker image for Flytescheduler deployment | -| flyte.flytescheduler.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.flytescheduler.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.flytescheduler.nodeSelector | object | `{}` | nodeSelector for Flytescheduler deployment | | flyte.flytescheduler.podAnnotations | object | `{}` | Annotations for Flytescheduler pods | | flyte.flytescheduler.resources | object | `{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}}` | Default resources requests and limits for Flytescheduler deployment | diff --git a/charts/flyte/values.yaml b/charts/flyte/values.yaml index acd3df9050..6444f2d334 100755 --- a/charts/flyte/values.yaml +++ b/charts/flyte/values.yaml @@ -15,7 +15,7 @@ flyte: # -- Docker image for Flyteadmin deployment repository: cr.flyte.org/flyteorg/flyteadmin # FLYTEADMIN_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTEADMIN_TAG + tag: v1.13.1-rc1 # FLYTEADMIN_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Additional flyteadmin container environment variables @@ -83,7 +83,7 @@ flyte: # -- Docker image for Flytescheduler deployment repository: cr.flyte.org/flyteorg/flytescheduler # FLYTESCHEDULER_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTESCHEDULER_TAG + tag: v1.13.1-rc1 # FLYTESCHEDULER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytescheduler deployment @@ -128,7 +128,7 @@ flyte: # -- Docker image for Datacatalog deployment repository: cr.flyte.org/flyteorg/datacatalog # DATACATALOG_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # DATACATALOG_TAG + tag: v1.13.1-rc1 # DATACATALOG_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Datacatalog deployment @@ -177,7 +177,7 @@ flyte: # -- Docker image for Flytepropeller deployment repository: cr.flyte.org/flyteorg/flytepropeller # FLYTEPROPELLER_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTEPROPELLER_TAG + tag: v1.13.1-rc1 # FLYTEPROPELLER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytepropeller deployment @@ -471,7 +471,7 @@ flyte: # -- Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) co-pilot: name: flyte-copilot- - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 # FLYTECOPILOT_IMAGE + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 # FLYTECOPILOT_IMAGE start-timeout: 30s # -- Core propeller configuration diff --git a/charts/flyteagent/README.md b/charts/flyteagent/README.md index e6851b5758..6bd7b056c4 100644 --- a/charts/flyteagent/README.md +++ b/charts/flyteagent/README.md @@ -20,7 +20,7 @@ A Helm chart for Flyte agent | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | image.repository | string | `"cr.flyte.org/flyteorg/flyteagent"` | Docker image for flyteagent deployment | -| image.tag | string | `"1.13.3"` | Docker image tag | +| image.tag | string | `"1.13.4"` | Docker image tag | | nameOverride | string | `""` | | | nodeSelector | object | `{}` | nodeSelector for flyteagent deployment | | podAnnotations | object | `{}` | Annotations for flyteagent pods | diff --git a/charts/flyteagent/values.yaml b/charts/flyteagent/values.yaml index ce23995df0..845248af90 100755 --- a/charts/flyteagent/values.yaml +++ b/charts/flyteagent/values.yaml @@ -23,7 +23,7 @@ image: # -- Docker image for flyteagent deployment repository: cr.flyte.org/flyteorg/flyteagent # FLYTEAGENT_IMAGE # -- Docker image tag - tag: 1.13.3 # FLYTEAGENT_TAG + tag: 1.13.4 # FLYTEAGENT_TAG # -- Docker image pull policy pullPolicy: IfNotPresent ports: diff --git a/deployment/agent/flyte_agent_helm_generated.yaml b/deployment/agent/flyte_agent_helm_generated.yaml index c8244070d5..53fe9016c2 100644 --- a/deployment/agent/flyte_agent_helm_generated.yaml +++ b/deployment/agent/flyte_agent_helm_generated.yaml @@ -79,7 +79,7 @@ spec: - pyflyte - serve - agent - image: "cr.flyte.org/flyteorg/flyteagent:1.13.3" + image: "cr.flyte.org/flyteorg/flyteagent:1.13.4" imagePullPolicy: "IfNotPresent" name: flyteagent volumeMounts: diff --git a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml index 7e2f9fa395..2468bf049c 100644 --- a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml +++ b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml @@ -430,7 +430,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -876,7 +876,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -897,7 +897,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -915,7 +915,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -932,7 +932,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -959,7 +959,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1066,7 +1066,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1196,7 +1196,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1214,7 +1214,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1277,7 +1277,7 @@ spec: template: metadata: annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1305,7 +1305,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1359,9 +1359,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1375,7 +1375,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1402,7 +1402,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/eks/flyte_helm_controlplane_generated.yaml b/deployment/eks/flyte_helm_controlplane_generated.yaml index b6fd465b34..4f98e96224 100644 --- a/deployment/eks/flyte_helm_controlplane_generated.yaml +++ b/deployment/eks/flyte_helm_controlplane_generated.yaml @@ -581,7 +581,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -602,7 +602,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -620,7 +620,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -637,7 +637,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -664,7 +664,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -771,7 +771,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -901,7 +901,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -919,7 +919,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1002,7 +1002,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1022,7 +1022,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: diff --git a/deployment/eks/flyte_helm_dataplane_generated.yaml b/deployment/eks/flyte_helm_dataplane_generated.yaml index 421c16dae3..d4780f1f25 100644 --- a/deployment/eks/flyte_helm_dataplane_generated.yaml +++ b/deployment/eks/flyte_helm_dataplane_generated.yaml @@ -94,7 +94,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -428,7 +428,7 @@ spec: template: metadata: annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -456,7 +456,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -510,9 +510,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -526,7 +526,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -553,7 +553,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/eks/flyte_helm_generated.yaml b/deployment/eks/flyte_helm_generated.yaml index 1e9d9a5bf1..db89ef2cf0 100644 --- a/deployment/eks/flyte_helm_generated.yaml +++ b/deployment/eks/flyte_helm_generated.yaml @@ -461,7 +461,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -907,7 +907,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -928,7 +928,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -946,7 +946,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -963,7 +963,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -990,7 +990,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1097,7 +1097,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1227,7 +1227,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1245,7 +1245,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1328,7 +1328,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1348,7 +1348,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -1407,7 +1407,7 @@ spec: template: metadata: annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1435,7 +1435,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1489,9 +1489,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1505,7 +1505,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1532,7 +1532,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/gcp/flyte_helm_controlplane_generated.yaml b/deployment/gcp/flyte_helm_controlplane_generated.yaml index 7d999c54fa..aa84954510 100644 --- a/deployment/gcp/flyte_helm_controlplane_generated.yaml +++ b/deployment/gcp/flyte_helm_controlplane_generated.yaml @@ -596,7 +596,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -617,7 +617,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -635,7 +635,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -652,7 +652,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -679,7 +679,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -786,7 +786,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -916,7 +916,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -934,7 +934,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1017,7 +1017,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1037,7 +1037,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: diff --git a/deployment/gcp/flyte_helm_dataplane_generated.yaml b/deployment/gcp/flyte_helm_dataplane_generated.yaml index a189c612a2..3d8f70b15b 100644 --- a/deployment/gcp/flyte_helm_dataplane_generated.yaml +++ b/deployment/gcp/flyte_helm_dataplane_generated.yaml @@ -94,7 +94,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -436,7 +436,7 @@ spec: template: metadata: annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -463,7 +463,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -517,9 +517,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -533,7 +533,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -560,7 +560,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/gcp/flyte_helm_generated.yaml b/deployment/gcp/flyte_helm_generated.yaml index c51e6972ce..9f501cf916 100644 --- a/deployment/gcp/flyte_helm_generated.yaml +++ b/deployment/gcp/flyte_helm_generated.yaml @@ -474,7 +474,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -930,7 +930,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -951,7 +951,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -969,7 +969,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -986,7 +986,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -1013,7 +1013,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1120,7 +1120,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1250,7 +1250,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1268,7 +1268,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1351,7 +1351,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1371,7 +1371,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -1430,7 +1430,7 @@ spec: template: metadata: annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1457,7 +1457,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1511,9 +1511,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1527,7 +1527,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1554,7 +1554,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml index 74a37957ea..ef73b3f145 100644 --- a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml +++ b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml @@ -116,7 +116,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" k8s-array: logs: config: @@ -359,7 +359,7 @@ spec: app.kubernetes.io/instance: flyte app.kubernetes.io/component: flyte-binary annotations: - checksum/configuration: 58c26a7a95c9edce075726e132dac345f0aafb69dea2b21f6445dc2615ee61fe + checksum/configuration: 2761c022d974c8ebb20ba44c0363cd80e46e2afd24ea916ebfe6b0f242f0418e checksum/configuration-secret: d5d93f4e67780b21593dc3799f0f6682aab0765e708e4020939975d14d44f929 checksum/cluster-resource-templates: 7dfa59f3d447e9c099b8f8ffad3af466fecbc9cf9f8c97295d9634254a55d4ae spec: diff --git a/deployment/sandbox/flyte_helm_generated.yaml b/deployment/sandbox/flyte_helm_generated.yaml index 28ab47df17..f1f71817b6 100644 --- a/deployment/sandbox/flyte_helm_generated.yaml +++ b/deployment/sandbox/flyte_helm_generated.yaml @@ -586,7 +586,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -6714,7 +6714,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -6734,7 +6734,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -6751,7 +6751,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -6767,7 +6767,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -6794,7 +6794,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -6891,7 +6891,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -7016,7 +7016,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -7033,7 +7033,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -7106,7 +7106,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -7125,7 +7125,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -7181,7 +7181,7 @@ spec: template: metadata: annotations: - configChecksum: "87f8dd83145c058839fbf440c688d131d5917282ae935b2fe02147df47ef3a7" + configChecksum: "b6a325d0de65783cfab97909bc7202fef4c3efc85edd2b95b5076e3681938f1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -7208,7 +7208,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -7255,9 +7255,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "87f8dd83145c058839fbf440c688d131d5917282ae935b2fe02147df47ef3a7" + configChecksum: "b6a325d0de65783cfab97909bc7202fef4c3efc85edd2b95b5076e3681938f1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -7271,7 +7271,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -7298,7 +7298,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 9e10ae09fb..3ba7075df8 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -469,7 +469,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" k8s-array: logs: config: @@ -817,7 +817,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: cEJlTDl0bXN6NVE4ZEdFag== + haSharedSecret: dkJZdnpKQ0FYZkhWano2eg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1248,7 +1248,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: 9207564b9b5f0358f7b8507232200ac759f58ae16af8561f72a4488274629eaf + checksum/configuration: 12c484f191527a693debafaa71bfcd04dbda7bfc87c83e385ea6d5c13188401f checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1414,7 +1414,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: ad16b8a2ae1014673d354d27f8f9e4307e588f439534e5894ecb7b0e4c8c7692 + checksum/secret: 4c91f059d15ecfed81f3906fb24896c41fde9103a61ace577327d080409423da labels: app: docker-registry release: flyte-sandbox @@ -1757,7 +1757,7 @@ spec: value: minio - name: FLYTE_AWS_SECRET_ACCESS_KEY value: miniostorage - image: cr.flyte.org/flyteorg/flyteagent:1.13.3 + image: cr.flyte.org/flyteorg/flyteagent:1.13.4 imagePullPolicy: IfNotPresent name: flyteagent ports: diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index ea327339fb..5c470d20cf 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -458,7 +458,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" k8s-array: logs: config: @@ -797,7 +797,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: UFppYnRpOVNHMVdlZkp0TA== + haSharedSecret: WGJQZFpzb2ZDSkU5dmJReQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1195,7 +1195,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: 6bc1ee22a1eb899398b82b56862cfb1aa09ed96f467d4eae11f2738c284115c2 + checksum/configuration: 0c0c4c2401e4d6362921a86660489536d0db8e4e66ae09e429adc54665c68021 checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1361,7 +1361,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: bfe262b4ef6c387db539e0d2b93d9557907a4a4b5aef3cec954b1ce593d364d9 + checksum/secret: 20145f8b7e37f104163904f86eeb0a46444c157de19f8e675128b04d16598ee4 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index 1f55acef66..787be05725 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: MEV1QmRqTlVpVHljaU9FeQ== + haSharedSecret: R2RwSGJNOERJN2NSWXNQNg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 9ac72b0cb595456c3e96447f44a0377762ab17d663e80e47079203fcbd518a34 + checksum/secret: a110328cf7fce9dfe57fe25438d4902fc3cc661346782bb261c0b6b80fb783d1 labels: app: docker-registry release: flyte-sandbox diff --git a/docs/conf.py b/docs/conf.py index 35b21a09ca..992b62f91f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -36,7 +36,7 @@ # The short X.Y version version = "" # The full version, including alpha/beta/rc tags -release = "1.13.1-rc0" +release = "1.13.1-rc1" # -- General configuration --------------------------------------------------- diff --git a/docs/deployment/configuration/generated/flyteadmin_config.rst b/docs/deployment/configuration/generated/flyteadmin_config.rst index 162cbc4d1d..0912738015 100644 --- a/docs/deployment/configuration/generated/flyteadmin_config.rst +++ b/docs/deployment/configuration/generated/flyteadmin_config.rst @@ -2756,6 +2756,8 @@ k8s (`config.K8sPluginConfig`_) resource-tolerations: null scheduler-name: "" send-object-events: false + update-backoff-retries: 5 + update-base-backoff-duration: 10 catalog.Config @@ -3228,6 +3230,30 @@ If true, will send k8s object events in TaskExecutionEvent updates. "false" +update-base-backoff-duration (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Initial delay in exponential backoff when updating a resource in milliseconds. + +**Default Value**: + +.. code-block:: yaml + + "10" + + +update-backoff-retries (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Number of retries for exponential backoff when updating a resource. + +**Default Value**: + +.. code-block:: yaml + + "5" + + config.FlyteCoPilotConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -4058,6 +4084,16 @@ Whether output data should be sent by reference when it is too large to be sent "false" +ErrorOnAlreadyExists (bool) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +**Default Value**: + +.. code-block:: yaml + + "false" + + config.KubeClientConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/deployment/configuration/generated/flytepropeller_config.rst b/docs/deployment/configuration/generated/flytepropeller_config.rst index a09a4b1e91..be6f7ee7f0 100644 --- a/docs/deployment/configuration/generated/flytepropeller_config.rst +++ b/docs/deployment/configuration/generated/flytepropeller_config.rst @@ -1211,6 +1211,8 @@ k8s (`config.K8sPluginConfig`_) resource-tolerations: null scheduler-name: "" send-object-events: false + update-backoff-retries: 5 + update-base-backoff-duration: 10 k8s-array (`k8s.Config`_) @@ -2712,6 +2714,30 @@ If true, will send k8s object events in TaskExecutionEvent updates. "false" +update-base-backoff-duration (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Initial delay in exponential backoff when updating a resource in milliseconds. + +**Default Value**: + +.. code-block:: yaml + + "10" + + +update-backoff-retries (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Number of retries for exponential backoff when updating a resource. + +**Default Value**: + +.. code-block:: yaml + + "5" + + config.FlyteCoPilotConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -4896,6 +4922,16 @@ Whether output data should be sent by reference when it is too large to be sent "false" +ErrorOnAlreadyExists (bool) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +**Default Value**: + +.. code-block:: yaml + + "false" + + config.KubeClientConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/deployment/configuration/generated/scheduler_config.rst b/docs/deployment/configuration/generated/scheduler_config.rst index 923e3db898..98ff1ee343 100644 --- a/docs/deployment/configuration/generated/scheduler_config.rst +++ b/docs/deployment/configuration/generated/scheduler_config.rst @@ -2756,6 +2756,8 @@ k8s (`config.K8sPluginConfig`_) resource-tolerations: null scheduler-name: "" send-object-events: false + update-backoff-retries: 5 + update-base-backoff-duration: 10 catalog.Config @@ -3228,6 +3230,30 @@ If true, will send k8s object events in TaskExecutionEvent updates. "false" +update-base-backoff-duration (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Initial delay in exponential backoff when updating a resource in milliseconds. + +**Default Value**: + +.. code-block:: yaml + + "10" + + +update-backoff-retries (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Number of retries for exponential backoff when updating a resource. + +**Default Value**: + +.. code-block:: yaml + + "5" + + config.FlyteCoPilotConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -4058,6 +4084,16 @@ Whether output data should be sent by reference when it is too large to be sent "false" +ErrorOnAlreadyExists (bool) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +**Default Value**: + +.. code-block:: yaml + + "false" + + config.KubeClientConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 7d59f106db997ab22686b1b414228fe323934c48 Mon Sep 17 00:00:00 2001 From: Yee Hing Tong Date: Sat, 24 Aug 2024 10:24:13 -0700 Subject: [PATCH 8/8] [flytectl] DataConfig missing from TaskSpec (#5692) Signed-off-by: Yee Hing Tong --- flytectl/cmd/register/register_util.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flytectl/cmd/register/register_util.go b/flytectl/cmd/register/register_util.go index 0bf5a23d49..b7b419e611 100644 --- a/flytectl/cmd/register/register_util.go +++ b/flytectl/cmd/register/register_util.go @@ -339,8 +339,9 @@ func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataRe } task.Template.Target = &core.TaskTemplate_K8SPod{ K8SPod: &core.K8SPod{ - Metadata: task.Template.GetK8SPod().Metadata, - PodSpec: podSpecStruct, + Metadata: task.Template.GetK8SPod().Metadata, + PodSpec: podSpecStruct, + DataConfig: task.Template.GetK8SPod().DataConfig, }, } }