Skip to content

Commit

Permalink
[RayService] Avoid Duplicate Serve Service (ray-project#1867)
Browse files Browse the repository at this point in the history
  • Loading branch information
Yicheng-Lu-llll authored Jan 30, 2024
1 parent acafbfe commit edd332b
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 60 deletions.
14 changes: 7 additions & 7 deletions ray-operator/controllers/ray/common/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ func DefaultWorkerPodTemplate(ctx context.Context, instance rayv1.RayCluster, wo
return podTemplate
}

func initLivenessAndReadinessProbe(rayContainer *corev1.Container, rayNodeType rayv1.RayNodeType, enableServeService bool) {
func initLivenessAndReadinessProbe(rayContainer *corev1.Container, rayNodeType rayv1.RayNodeType, creator string) {
rayAgentRayletHealthCommand := fmt.Sprintf(utils.BaseWgetHealthCommand, utils.DefaultDashboardAgentListenPort, utils.RayAgentRayletHealthPath)
rayDashboardGCSHealthCommand := fmt.Sprintf(utils.BaseWgetHealthCommand, utils.DefaultDashboardPort, utils.RayDashboardGCSHealthPath)

Expand Down Expand Up @@ -281,7 +281,7 @@ func initLivenessAndReadinessProbe(rayContainer *corev1.Container, rayNodeType r
// For worker Pods serving traffic, we need to add an additional HTTP proxy health check for the readiness probe.
// Note: head Pod checks the HTTP proxy's health at every rayservice controller reconcile instaed of using readiness probe.
// See https://github.com/ray-project/kuberay/pull/1808 for reasons.
if enableServeService && rayNodeType == rayv1.WorkerNode {
if strings.EqualFold(creator, string(utils.RayServiceCRD)) && rayNodeType == rayv1.WorkerNode {
rayContainer.ReadinessProbe.FailureThreshold = utils.ServeReadinessProbeFailureThreshold
rayServeProxyHealthCommand := fmt.Sprintf(utils.BaseWgetHealthCommand,
utils.FindContainerPort(rayContainer, utils.ServingPortName, utils.DefaultServingPort), utils.RayServeProxyHealthPath)
Expand All @@ -292,14 +292,14 @@ func initLivenessAndReadinessProbe(rayContainer *corev1.Container, rayNodeType r
}

// BuildPod a pod config
func BuildPod(ctx context.Context, podTemplateSpec corev1.PodTemplateSpec, rayNodeType rayv1.RayNodeType, rayStartParams map[string]string, headPort string, enableRayAutoscaler *bool, creator string, fqdnRayIP string, enableServeService bool) (aPod corev1.Pod) {
func BuildPod(ctx context.Context, podTemplateSpec corev1.PodTemplateSpec, rayNodeType rayv1.RayNodeType, rayStartParams map[string]string, headPort string, enableRayAutoscaler *bool, creator string, fqdnRayIP string) (aPod corev1.Pod) {
log := ctrl.LoggerFrom(ctx)

// For Worker Pod: Traffic readiness is determined by the readiness probe.
// Therefore, the RayClusterServingServiceLabelKey label is not utilized and should always be set to true.
// For Head Pod: Traffic readiness is determined by the value of the RayClusterServingServiceLabelKey label.
// Initially, set the label to false and let the rayservice controller to manage its value.
log := ctrl.LoggerFrom(ctx)

if enableServeService {
if strings.EqualFold(creator, string(utils.RayServiceCRD)) {
podTemplateSpec.Labels[utils.RayClusterServingServiceLabelKey] = utils.EnableRayClusterServingServiceTrue
if rayNodeType == rayv1.HeadNode {
podTemplateSpec.Labels[utils.RayClusterServingServiceLabelKey] = utils.EnableRayClusterServingServiceFalse
Expand Down Expand Up @@ -380,7 +380,7 @@ func BuildPod(ctx context.Context, podTemplateSpec corev1.PodTemplateSpec, rayNo
// Configure the readiness and liveness probes for the Ray container. These probes
// play a crucial role in KubeRay health checks. Without them, certain failures,
// such as the Raylet process crashing, may go undetected.
initLivenessAndReadinessProbe(&pod.Spec.Containers[utils.RayContainerIndex], rayNodeType, enableServeService)
initLivenessAndReadinessProbe(&pod.Spec.Containers[utils.RayContainerIndex], rayNodeType, creator)
}

return pod
Expand Down
74 changes: 34 additions & 40 deletions ray-operator/controllers/ray/common/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,11 +335,7 @@ func TestBuildPod(t *testing.T) {
// Test head pod
podName := strings.ToLower(cluster.Name + utils.DashSymbol + string(rayv1.HeadNode) + utils.DashSymbol + utils.FormatInt32(0))
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "", true)

val, ok := pod.Labels[utils.RayClusterServingServiceLabelKey]
assert.True(t, ok, "Expected serve label is not present")
assert.Equal(t, utils.EnableRayClusterServingServiceFalse, val, "Wrong serve label value")
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "")

// Check environment variables
rayContainer := pod.Spec.Containers[utils.RayContainerIndex]
Expand Down Expand Up @@ -392,11 +388,7 @@ func TestBuildPod(t *testing.T) {
podName = cluster.Name + utils.DashSymbol + string(rayv1.WorkerNode) + utils.DashSymbol + worker.GroupName + utils.DashSymbol + utils.FormatInt32(0)
fqdnRayIP := utils.GenerateFQDNServiceName(ctx, *cluster, cluster.Namespace)
podTemplateSpec = DefaultWorkerPodTemplate(ctx, *cluster, worker, podName, fqdnRayIP, "6379")
pod = BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP, true)

val, ok = pod.Labels[utils.RayClusterServingServiceLabelKey]
assert.True(t, ok, "Expected serve label is not present")
assert.Equal(t, utils.EnableRayClusterServingServiceTrue, val, "Wrong serve label value")
pod = BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP)

// Check environment variables
rayContainer = pod.Spec.Containers[utils.RayContainerIndex]
Expand Down Expand Up @@ -435,7 +427,7 @@ func TestBuildPod_WithOverwriteCommand(t *testing.T) {

podName := strings.ToLower(cluster.Name + utils.DashSymbol + string(rayv1.HeadNode) + utils.DashSymbol + utils.FormatInt32(0))
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
headPod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "", false)
headPod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "")
headContainer := headPod.Spec.Containers[utils.RayContainerIndex]
assert.Equal(t, headContainer.Command, []string{"I am head"})
assert.Equal(t, headContainer.Args, []string{"I am head again"})
Expand All @@ -444,7 +436,7 @@ func TestBuildPod_WithOverwriteCommand(t *testing.T) {
podName = cluster.Name + utils.DashSymbol + string(rayv1.WorkerNode) + utils.DashSymbol + worker.GroupName + utils.DashSymbol + utils.FormatInt32(0)
fqdnRayIP := utils.GenerateFQDNServiceName(ctx, *cluster, cluster.Namespace)
podTemplateSpec = DefaultWorkerPodTemplate(ctx, *cluster, worker, podName, fqdnRayIP, "6379")
workerPod := BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP, false)
workerPod := BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP)
workerContainer := workerPod.Spec.Containers[utils.RayContainerIndex]
assert.Equal(t, workerContainer.Command, []string{"I am worker"})
assert.Equal(t, workerContainer.Args, []string{"I am worker again"})
Expand All @@ -456,7 +448,7 @@ func TestBuildPod_WithAutoscalerEnabled(t *testing.T) {
cluster.Spec.EnableInTreeAutoscaling = &trueFlag
podName := strings.ToLower(cluster.Name + utils.DashSymbol + string(rayv1.HeadNode) + utils.DashSymbol + utils.FormatInt32(0))
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "", false)
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "")

actualResult := pod.Labels[utils.RayClusterLabelKey]
expectedResult := cluster.Name
Expand Down Expand Up @@ -507,29 +499,29 @@ func TestBuildPod_WithAutoscalerEnabled(t *testing.T) {
}

func TestBuildPod_WithCreatedByRayService(t *testing.T) {
ctx := context.Background()

cluster := instance.DeepCopy()
cluster.Spec.EnableInTreeAutoscaling = &trueFlag
podName := strings.ToLower(cluster.Name + utils.DashSymbol + string(rayv1.HeadNode) + utils.DashSymbol + utils.FormatInt32(0))
podTemplateSpec := DefaultHeadPodTemplate(context.Background(), *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod := BuildPod(context.Background(), podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, string(utils.RayServiceCRD), "", false)
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, string(utils.RayServiceCRD), "")

hasCorrectDeathEnv := false
for _, container := range pod.Spec.Containers {
if container.Name != "ray-head" {
continue
}
if container.Env == nil || len(container.Env) == 0 {
t.Fatalf("Expected death env `%v`", container)
}
for _, env := range container.Env {
if env.Name == utils.RAY_TIMEOUT_MS_TASK_WAIT_FOR_DEATH_INFO {
assert.Equal(t, "0", env.Value)
hasCorrectDeathEnv = true
break
}
}
}
assert.True(t, hasCorrectDeathEnv)
val, ok := pod.Labels[utils.RayClusterServingServiceLabelKey]
assert.True(t, ok, "Expected serve label is not present")
assert.Equal(t, utils.EnableRayClusterServingServiceFalse, val, "Wrong serve label value")
utils.EnvVarExists(utils.RAY_TIMEOUT_MS_TASK_WAIT_FOR_DEATH_INFO, pod.Spec.Containers[utils.RayContainerIndex].Env)

worker := cluster.Spec.WorkerGroupSpecs[0]
podName = cluster.Name + utils.DashSymbol + string(rayv1.WorkerNode) + utils.DashSymbol + worker.GroupName + utils.DashSymbol + utils.FormatInt32(0)
fqdnRayIP := utils.GenerateFQDNServiceName(ctx, *cluster, cluster.Namespace)
podTemplateSpec = DefaultWorkerPodTemplate(ctx, *cluster, worker, podName, fqdnRayIP, "6379")
pod = BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, string(utils.RayServiceCRD), fqdnRayIP)

val, ok = pod.Labels[utils.RayClusterServingServiceLabelKey]
assert.True(t, ok, "Expected serve label is not present")
assert.Equal(t, utils.EnableRayClusterServingServiceTrue, val, "Wrong serve label value")
utils.EnvVarExists(utils.RAY_TIMEOUT_MS_TASK_WAIT_FOR_DEATH_INFO, pod.Spec.Containers[utils.RayContainerIndex].Env)
}

func TestBuildPod_WithGcsFtEnabled(t *testing.T) {
Expand All @@ -543,7 +535,7 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) {
// Build a head Pod.
podName := strings.ToLower(cluster.Name + utils.DashSymbol + string(rayv1.HeadNode) + utils.DashSymbol + utils.FormatInt32(0))
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "", false)
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "")

// Check environment variable "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S"
rayContainer := pod.Spec.Containers[utils.RayContainerIndex]
Expand All @@ -561,7 +553,7 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) {
cluster.Spec.HeadGroupSpec.Template.Spec.Containers[utils.RayContainerIndex].Env = append(cluster.Spec.HeadGroupSpec.Template.Spec.Containers[utils.RayContainerIndex].Env,
corev1.EnvVar{Name: utils.RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S, Value: "60"})
podTemplateSpec = DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod = BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "", false)
pod = BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "")
rayContainer = pod.Spec.Containers[utils.RayContainerIndex]

// Check environment variable "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S"
Expand All @@ -578,7 +570,7 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) {
podName = cluster.Name + utils.DashSymbol + string(rayv1.WorkerNode) + utils.DashSymbol + worker.GroupName + utils.DashSymbol + utils.FormatInt32(0)
fqdnRayIP := utils.GenerateFQDNServiceName(ctx, *cluster, cluster.Namespace)
podTemplateSpec = DefaultWorkerPodTemplate(ctx, *cluster, worker, podName, fqdnRayIP, "6379")
pod = BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP, false)
pod = BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP)

// Check the default value of "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S"
rayContainer = pod.Spec.Containers[utils.RayContainerIndex]
Expand All @@ -595,7 +587,7 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) {
corev1.EnvVar{Name: utils.RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S, Value: "120"})
worker = cluster.Spec.WorkerGroupSpecs[0]
podTemplateSpec = DefaultWorkerPodTemplate(ctx, *cluster, worker, podName, fqdnRayIP, "6379")
pod = BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP, false)
pod = BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP)

// Check the default value of "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S"
rayContainer = pod.Spec.Containers[utils.RayContainerIndex]
Expand Down Expand Up @@ -666,7 +658,7 @@ func TestBuildPodWithAutoscalerOptions(t *testing.T) {
SecurityContext: &customSecurityContext,
}
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "", false)
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "")
expectedContainer := *autoscalerContainer.DeepCopy()
expectedContainer.Image = customAutoscalerImage
expectedContainer.ImagePullPolicy = customPullPolicy
Expand Down Expand Up @@ -840,7 +832,7 @@ func TestCleanupInvalidVolumeMounts(t *testing.T) {
// Test head pod
podName := strings.ToLower(cluster.Name + utils.DashSymbol + string(rayv1.HeadNode) + utils.DashSymbol + utils.FormatInt32(0))
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "", false)
pod := BuildPod(ctx, podTemplateSpec, rayv1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "")

pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, []corev1.VolumeMount{
{
Expand Down Expand Up @@ -1211,16 +1203,18 @@ func TestInitLivenessAndReadinessProbe(t *testing.T) {

rayContainer.LivenessProbe = &httpGetProbe
rayContainer.ReadinessProbe = &httpGetProbe
initLivenessAndReadinessProbe(rayContainer, rayv1.HeadNode, false)
initLivenessAndReadinessProbe(rayContainer, rayv1.HeadNode, "")
assert.NotNil(t, rayContainer.LivenessProbe.HTTPGet)
assert.NotNil(t, rayContainer.ReadinessProbe.HTTPGet)
assert.Nil(t, rayContainer.LivenessProbe.Exec)
assert.Nil(t, rayContainer.ReadinessProbe.Exec)

// Test 2: User does not define a custom probe. KubeRay will inject Exec probe.
// Here we test the case where the Ray Pod originates from RayServiceCRD,
// implying that an additional serve health check will be added to the readiness probe.
rayContainer.LivenessProbe = nil
rayContainer.ReadinessProbe = nil
initLivenessAndReadinessProbe(rayContainer, rayv1.WorkerNode, true)
initLivenessAndReadinessProbe(rayContainer, rayv1.WorkerNode, utils.RayOriginatedFromCRDLabelValue(utils.RayServiceCRD))
assert.NotNil(t, rayContainer.LivenessProbe.Exec)
assert.NotNil(t, rayContainer.ReadinessProbe.Exec)
assert.False(t, strings.Contains(strings.Join(rayContainer.LivenessProbe.Exec.Command, " "), utils.RayServeProxyHealthPath))
Expand Down
14 changes: 2 additions & 12 deletions ray-operator/controllers/ray/raycluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1040,19 +1040,14 @@ func (r *RayClusterReconciler) buildHeadPod(ctx context.Context, instance rayv1.
fqdnRayIP := utils.GenerateFQDNServiceName(ctx, instance, instance.Namespace) // Fully Qualified Domain Name
// The Ray head port used by workers to connect to the cluster (GCS server port for Ray >= 1.11.0, Redis port for older Ray.)
headPort := common.GetHeadPort(instance.Spec.HeadGroupSpec.RayStartParams)
// Check whether serve is enabled and add serve label
serveLabel := false
if enableServeServiceValue, exist := instance.Annotations[utils.EnableServeServiceKey]; exist && enableServeServiceValue == utils.EnableServeServiceTrue {
serveLabel = true
}
autoscalingEnabled := instance.Spec.EnableInTreeAutoscaling
podConf := common.DefaultHeadPodTemplate(ctx, instance, instance.Spec.HeadGroupSpec, podName, headPort)
if len(r.headSidecarContainers) > 0 {
podConf.Spec.Containers = append(podConf.Spec.Containers, r.headSidecarContainers...)
}
r.Log.Info("head pod labels", "labels", podConf.Labels)
creatorName := getCreator(instance)
pod := common.BuildPod(ctx, podConf, rayv1.HeadNode, instance.Spec.HeadGroupSpec.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP, serveLabel)
pod := common.BuildPod(ctx, podConf, rayv1.HeadNode, instance.Spec.HeadGroupSpec.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP)
// Set raycluster instance as the owner and controller
if err := controllerutil.SetControllerReference(&instance, &pod, r.Scheme); err != nil {
r.Log.Error(err, "Failed to set controller reference for raycluster pod")
Expand Down Expand Up @@ -1089,12 +1084,7 @@ func (r *RayClusterReconciler) buildWorkerPod(ctx context.Context, instance rayv
podTemplateSpec.Spec.Containers = append(podTemplateSpec.Spec.Containers, r.workerSidecarContainers...)
}
creatorName := getCreator(instance)
// Check whether serve is enabled and add serve label
serveLabel := false
if enableServeServiceValue, exist := instance.Annotations[utils.EnableServeServiceKey]; exist && enableServeServiceValue == utils.EnableServeServiceTrue {
serveLabel = true
}
pod := common.BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP, serveLabel)
pod := common.BuildPod(ctx, podTemplateSpec, rayv1.WorkerNode, worker.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP)
// Set raycluster instance as the owner and controller
if err := controllerutil.SetControllerReference(&instance, &pod, r.Scheme); err != nil {
r.Log.Error(err, "Failed to set controller reference for raycluster pod")
Expand Down
1 change: 0 additions & 1 deletion ray-operator/controllers/ray/rayservice_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,6 @@ func (r *RayServiceReconciler) constructRayClusterForRayService(rayService *rayv
for k, v := range rayService.Annotations {
rayClusterAnnotations[k] = v
}
rayClusterAnnotations[utils.EnableServeServiceKey] = utils.EnableServeServiceTrue
errContext := "Failed to serialize RayCluster config. " +
"Manual config updates will NOT be tracked accurately. " +
"Please tear down the cluster and apply a new config."
Expand Down
2 changes: 2 additions & 0 deletions ray-operator/controllers/ray/utils/constant.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ const (
// Finalizers for GCS fault tolerance
GCSFaultToleranceRedisCleanupFinalizer = "ray.io/gcs-ft-redis-cleanup-finalizer"

// EnableServeServiceKey is exclusively utilized to indicate if a RayCluster is directly used for serving.
// See https://github.com/ray-project/kuberay/pull/1672 for more details.
EnableServeServiceKey = "ray.io/enable-serve-service"
EnableServeServiceTrue = "true"

Expand Down

0 comments on commit edd332b

Please sign in to comment.