From 2a64ca0502a1134b2d464524c6aad0a72cf557d0 Mon Sep 17 00:00:00 2001 From: ii2day Date: Tue, 8 Aug 2023 15:37:09 +0800 Subject: [PATCH] Adapt scheduler report Signed-off-by: ii2day --- charts/templates/configmap-app-template.yaml | 20 ++- charts/templates/deployment.yaml | 1 + charts/templates/ingress.yaml | 25 ---- cmd/agent/cmd/cert.go | 4 +- cmd/agent/cmd/config.go | 6 - cmd/controller/cmd/config.go | 3 + pkg/grpcManager/server_implement.go | 4 +- pkg/k8ObjManager/deployment.go | 83 +++++++++++ pkg/k8ObjManager/manager.go | 5 + pkg/pluginManager/agentManager.go | 5 +- pkg/pluginManager/controllerManager.go | 37 +++-- pkg/pluginManager/controllerTools.go | 5 +- .../netreach/agentExecuteTask.go | 132 ++++++++++-------- pkg/reportManager/manager.go | 5 +- pkg/reportManager/worker.go | 93 ++++++------ pkg/scheduler/schedule.go | 59 +++++++- pkg/types/controller_config.go | 3 + .../apphttphealth/apphttphealth_suite_test.go | 6 +- test/e2e/apphttphealth/apphttphealth_test.go | 85 +++++++++-- test/e2e/common/tools.go | 8 +- test/e2e/netdns/netdns_suite_test.go | 6 +- test/e2e/netdns/netdns_test.go | 14 +- test/e2e/netreach/netreach_suite_test.go | 8 +- test/e2e/netreach/netreach_test.go | 9 +- test/scripts/debugCluster.sh | 4 +- 25 files changed, 440 insertions(+), 190 deletions(-) delete mode 100644 charts/templates/ingress.yaml create mode 100644 pkg/k8ObjManager/deployment.go diff --git a/charts/templates/configmap-app-template.yaml b/charts/templates/configmap-app-template.yaml index b4c7bece..97b439ed 100644 --- a/charts/templates/configmap-app-template.yaml +++ b/charts/templates/configmap-app-template.yaml @@ -296,7 +296,6 @@ data: targetPort: metrics protocol: TCP {{- end }} - {{- if .Values.feature.enableIPv4 }} - name: http port: {{ .Values.kdoctorAgent.httpServer.appHttpPort }} targetPort: http @@ -305,9 +304,24 @@ data: port: {{ .Values.kdoctorAgent.httpServer.appHttpsPort }} targetPort: https protocol: TCP - {{- end }} ipFamilyPolicy: SingleStack ipFamilies: - IPv4 selector: - {{- include "project.kdoctorAgent.selectorLabels" . | nindent 8 }} \ No newline at end of file + {{- include "project.kdoctorAgent.selectorLabels" . | nindent 8 }} + ingress.yml: | + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + namespace: {{ .Release.Namespace | quote }} + spec: + rules: + - http: + paths: + - path: {{ .Values.kdoctorAgent.ingress.route | quote }} + pathType: Exact + backend: + service: + name: kdoctor + port: + number: {{ .Values.kdoctorAgent.httpServer.appHttpPort }} diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index 0da141f5..d0acdae3 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -77,6 +77,7 @@ spec: - --configmap-daemonset-template=/tmp/configmap-app-template/daemonset.yml - --configmap-pod-template=/tmp/configmap-app-template/pod.yml - --configmap-service-template=/tmp/configmap-app-template/service.yml + - --configmap-ingress-template=/tmp/configmap-app-template/ingress.yml - --tls-ca-cert=/etc/tls/ca.crt - --tls-server-cert=/etc/tls/tls.crt - --tls-server-key=/etc/tls/tls.key diff --git a/charts/templates/ingress.yaml b/charts/templates/ingress.yaml deleted file mode 100644 index 2029ae0d..00000000 --- a/charts/templates/ingress.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.kdoctorAgent.ingress.enable }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ include "project.kdoctorAgent.ingressName" . }} - namespace: {{ .Release.Namespace | quote }} -spec: - {{- if .Values.kdoctorAgent.ingress.ingressClass }} - ingressClassName: {{ .Values.kdoctorAgent.ingress.ingressClass | quote }} - {{- end }} - rules: - - http: - paths: - - path: {{ .Values.kdoctorAgent.ingress.route | quote }} - pathType: Exact - backend: - service: - {{- if .Values.feature.enableIPv4 }} - name: {{ include "project.kdoctorAgent.serviceIpv4Name" . }} - {{- else }} - name: {{ include "project.kdoctorAgent.serviceIpv6Name" . }} - {{- end }} - port: - number: {{ .Values.kdoctorAgent.httpServer.appHttpPort }} -{{- end }} \ No newline at end of file diff --git a/cmd/agent/cmd/cert.go b/cmd/agent/cmd/cert.go index 23865d80..6299c177 100644 --- a/cmd/agent/cmd/cert.go +++ b/cmd/agent/cmd/cert.go @@ -68,7 +68,7 @@ func GenServerCert(logger *zap.Logger) { if types.AgentConfig.Configmap.EnableIPv6 { serviceIPv6, err := k8sObjManager.GetK8sObjManager().GetServiceAccessUrl(context.Background(), types.AgentConfig.ServiceV6Name, types.AgentConfig.PodNamespace, servicePortName) if err != nil { - logger.Sugar().Fatalf("failed to get kdoctor ipv4 service %s/%s, reason=%v ", types.AgentConfig.PodNamespace, types.AgentConfig.ServiceV6Name, err) + logger.Sugar().Fatalf("failed to get kdoctor ipv6 service %s/%s, reason=%v ", types.AgentConfig.PodNamespace, types.AgentConfig.ServiceV6Name, err) } // ipv6 ip logger.Sugar().Debugf("get ipv6 serviceAccessurl %v", serviceIPv6) @@ -129,7 +129,7 @@ func checkServiceReady(logger *zap.Logger) { if types.AgentConfig.Configmap.EnableIPv6 { for { - _, err := k8sObjManager.GetK8sObjManager().GetService(ctx, types.AgentConfig.ServiceV4Name, types.AgentConfig.PodNamespace) + _, err := k8sObjManager.GetK8sObjManager().GetService(ctx, types.AgentConfig.ServiceV6Name, types.AgentConfig.PodNamespace) if nil != err { if errors.IsNotFound(err) { logger.Sugar().Errorf("agent runtime IPv6 service %s/%s not exists, wait for controller to create it", types.AgentConfig.PodNamespace, types.AgentConfig.ServiceV6Name) diff --git a/cmd/agent/cmd/config.go b/cmd/agent/cmd/config.go index 8736105a..22ddbcde 100644 --- a/cmd/agent/cmd/config.go +++ b/cmd/agent/cmd/config.go @@ -74,12 +74,6 @@ func init() { globalFlag.StringVar(&types.AgentConfig.TaskName, "task-name", "", "task name") globalFlag.StringVar(&types.AgentConfig.ServiceV4Name, "service-ipv4-name", "", "agent IPv4 service name") globalFlag.StringVar(&types.AgentConfig.ServiceV6Name, "service-ipv6-name", "", "agent IPv6 service name") - if err := rootCmd.MarkPersistentFlagRequired("task-kind"); nil != err { - logger.Sugar().Fatalf("failed to mark persistentFlag 'task-kind' as required, error: %v", err) - } - if err := rootCmd.MarkPersistentFlagRequired("task-name"); nil != err { - logger.Sugar().Fatalf("failed to mark persistentFlag 'task-name' as required, error: %v", err) - } globalFlag.BoolVarP(&types.AgentConfig.AppMode, "app-mode", "A", false, "app mode") globalFlag.BoolVarP(&types.AgentConfig.TlsInsecure, "tls-insecure", "K", true, "skip verify tls") diff --git a/cmd/controller/cmd/config.go b/cmd/controller/cmd/config.go index 8e5e642a..1722ec99 100644 --- a/cmd/controller/cmd/config.go +++ b/cmd/controller/cmd/config.go @@ -78,6 +78,7 @@ func init() { globalFlag.StringVar(&types.ControllerConfig.ConfigMapDaemonsetPath, "configmap-daemonset-template", "", "configmap daemonset template file path") globalFlag.StringVar(&types.ControllerConfig.ConfigMapPodPath, "configmap-pod-template", "", "configmap pod template file path") globalFlag.StringVar(&types.ControllerConfig.ConfigMapServicePath, "configmap-service-template", "", "configmap service template file path") + globalFlag.StringVar(&types.ControllerConfig.ConfigMapIngressPath, "configmap-ingress-template", "", "configmap ingress template file path") globalFlag.StringVarP(&types.ControllerConfig.TlsCaCertPath, "tls-ca-cert", "R", "", "ca file path") globalFlag.StringVarP(&types.ControllerConfig.TlsServerCertPath, "tls-server-cert", "T", "", "server cert file path") globalFlag.StringVarP(&types.ControllerConfig.TlsServerKeyPath, "tls-server-key", "Y", "", "server key file path") @@ -90,6 +91,7 @@ func init() { logger.Info("configmap-daemonset-template = " + types.ControllerConfig.ConfigMapDaemonsetPath) logger.Info("configmap-pod-template = " + types.ControllerConfig.ConfigMapPodPath) logger.Info("configmap-service-template = " + types.ControllerConfig.ConfigMapServicePath) + logger.Info("configmap-ingress-template = " + types.ControllerConfig.ConfigMapIngressPath) logger.Info("tls-ca-cert = " + types.ControllerConfig.TlsCaCertPath) logger.Info("tls-server-cert = " + types.ControllerConfig.TlsServerCertPath) logger.Info("tls-server-key = " + types.ControllerConfig.TlsServerKeyPath) @@ -122,6 +124,7 @@ func init() { fn(types.ControllerConfig.ConfigMapDaemonsetPath, types.DaemonsetTempl) fn(types.ControllerConfig.ConfigMapPodPath, types.PodTempl) fn(types.ControllerConfig.ConfigMapServicePath, types.ServiceTempl) + fn(types.ControllerConfig.ConfigMapIngressPath, types.IngressTempl) } cobra.OnInitialize(printFlag) diff --git a/pkg/grpcManager/server_implement.go b/pkg/grpcManager/server_implement.go index 5ee8558a..d0de4e9f 100644 --- a/pkg/grpcManager/server_implement.go +++ b/pkg/grpcManager/server_implement.go @@ -7,13 +7,12 @@ import ( "context" "fmt" "github.com/kdoctor-io/kdoctor/api/v1/agentGrpc" + "github.com/kdoctor-io/kdoctor/pkg/utils" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "io" "time" - - "github.com/kdoctor-io/kdoctor/pkg/utils" ) // ------ implement @@ -31,6 +30,7 @@ func (s *myGrpcServer) ExecRemoteCmd(stream agentGrpc.CmdService_ExecRemoteCmdSe logger.Error("grpc server ExecRemoteCmd: got empty command \n") return nil, status.Error(codes.InvalidArgument, "request command is empty") } + if r.Timeoutsecond == 0 { logger.Error("grpc server ExecRemoteCmd: got empty timeout \n") return nil, status.Error(codes.InvalidArgument, "request command is empty") diff --git a/pkg/k8ObjManager/deployment.go b/pkg/k8ObjManager/deployment.go new file mode 100644 index 00000000..f6141437 --- /dev/null +++ b/pkg/k8ObjManager/deployment.go @@ -0,0 +1,83 @@ +// Copyright 2023 Authors of kdoctor-io +// SPDX-License-Identifier: Apache-2.0 + +package k8sObjManager + +import ( + "context" + "fmt" + "github.com/kdoctor-io/kdoctor/pkg/utils" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (nm *k8sObjManager) GetDeployment(ctx context.Context, name, namespace string) (*appsv1.Deployment, error) { + d := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + key := client.ObjectKeyFromObject(d) + if e := nm.client.Get(ctx, key, d); e != nil { + return nil, fmt.Errorf("failed to get deployment %v/%v, reason=%v", namespace, name, e) + } + return d, nil +} + +func (nm *k8sObjManager) ListDeploymentPod(ctx context.Context, deploymentName, deploymentNameSpace string) ([]corev1.Pod, error) { + + dae, e := nm.GetDeployment(ctx, deploymentName, deploymentNameSpace) + if e != nil { + return nil, fmt.Errorf("failed to get daemonset, error=%v", e) + } + + podLable := dae.Spec.Template.Labels + opts := []client.ListOption{ + client.MatchingLabelsSelector{ + Selector: labels.SelectorFromSet(podLable), + }, + } + return nm.GetPodList(ctx, opts...) +} + +func (nm *k8sObjManager) ListDeploymentPodIPs(ctx context.Context, deploymentName, deploymentNameSpace string) (PodIps, error) { + + podList, e := nm.ListDeploymentPod(ctx, deploymentName, deploymentNameSpace) + if e != nil { + return nil, e + } + if len(podList) == 0 { + return nil, fmt.Errorf("failed to get any pods") + } + + result := PodIps{} + + for _, v := range podList { + t := IPs{} + t.InterfaceName = "eth0" + for _, m := range v.Status.PodIPs { + if utils.CheckIPv4Format(m.IP) { + t.IPv4 = m.IP + } else if utils.CheckIPv6Format(m.IP) { + t.IPv6 = m.IP + } + } + result[v.Name] = []IPs{t} + } + return result, nil +} + +func (nm *k8sObjManager) ListDeployPodMultusIPs(ctx context.Context, deploymentName, deploymentNameSpace string) (PodIps, error) { + podlist, e := nm.ListDeploymentPod(ctx, deploymentName, deploymentNameSpace) + if e != nil { + return nil, e + } + if len(podlist) == 0 { + return nil, fmt.Errorf("failed to get any pods") + } + return parseMultusIP(podlist) +} diff --git a/pkg/k8ObjManager/manager.go b/pkg/k8ObjManager/manager.go index 7e134990..31f4ccbd 100644 --- a/pkg/k8ObjManager/manager.go +++ b/pkg/k8ObjManager/manager.go @@ -28,6 +28,11 @@ type K8sObjManager interface { ListDaemonsetPodIPs(ctx context.Context, daemonsetName, daemonsetNameSpace string) (PodIps, error) ListDaemonsetPodMultusIPs(ctx context.Context, daemonsetName, daemonsetNameSpace string) (PodIps, error) + // deployment + GetDeployment(ctx context.Context, name, namespace string) (*appsv1.Deployment, error) + ListDeploymentPodIPs(ctx context.Context, deploymentName, deploymentNameSpace string) (PodIps, error) + ListDeployPodMultusIPs(ctx context.Context, deploymentName, deploymentNameSpace string) (PodIps, error) + // pod GetPodList(ctx context.Context, opts ...client.ListOption) ([]corev1.Pod, error) ListSelectedPodMultusIPs(ctx context.Context, labelSelector *metav1.LabelSelector) (PodIps, error) diff --git a/pkg/pluginManager/agentManager.go b/pkg/pluginManager/agentManager.go index 0fadb621..577a6d58 100644 --- a/pkg/pluginManager/agentManager.go +++ b/pkg/pluginManager/agentManager.go @@ -6,6 +6,7 @@ package pluginManager import ( "context" "fmt" + networkingv1 "k8s.io/api/networking/v1" "time" appsv1 "k8s.io/api/apps/v1" @@ -36,7 +37,9 @@ func (s *pluginManager) RunAgentController() { if e := crd.AddToScheme(scheme); e != nil { logger.Sugar().Fatalf("failed to add scheme for plugins, reason=%v", e) } - + if e := networkingv1.AddToScheme(scheme); e != nil { + logger.Sugar().Fatalf("failed to add scheme for plugins, reason=%v", e) + } var fm fileManager.FileManager var e error if types.AgentConfig.EnableAggregateAgentReport { diff --git a/pkg/pluginManager/controllerManager.go b/pkg/pluginManager/controllerManager.go index 002a508c..3e406937 100644 --- a/pkg/pluginManager/controllerManager.go +++ b/pkg/pluginManager/controllerManager.go @@ -10,6 +10,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" @@ -34,7 +35,9 @@ func (s *pluginManager) RunControllerController(healthPort int, webhookPort int, if e := crd.AddToScheme(scheme); e != nil { logger.Sugar().Fatalf("failed to add scheme for plugins, reason=%v", e) } - + if e := networkingv1.AddToScheme(scheme); e != nil { + logger.Sugar().Fatalf("failed to add scheme for plugins, reason=%v", e) + } n := ctrl.Options{ Scheme: scheme, MetricsBindAddress: "0", @@ -89,8 +92,7 @@ func (s *pluginManager) RunControllerController(healthPort int, webhookPort int, var e error // TODO: runControllerAggregateReportOnce need agents' IPs - //if types.ControllerConfig.EnableAggregateAgentReport { - if false { + if types.ControllerConfig.EnableAggregateAgentReport { // fileManager takes charge of writing and removing local report gcInterval := time.Duration(types.ControllerConfig.CleanAgedReportInMinute) * time.Minute logger.Sugar().Infof("save report to %v, clean interval %v", types.ControllerConfig.DirPathControllerReport, gcInterval.String()) @@ -98,13 +100,9 @@ func (s *pluginManager) RunControllerController(healthPort int, webhookPort int, if e != nil { logger.Sugar().Fatalf("failed to new fileManager , reason=%v", e) } - - // reportManager takes charge of sync reports from remote agents - interval := time.Duration(types.ControllerConfig.CollectAgentReportIntervalInSecond) * time.Second - logger.Sugar().Infof("run report Sync manager, save to %v, collectInterval %v ", types.ControllerConfig.DirPathControllerReport, interval) - reportManager.InitReportManager(logger.Named("reportSyncManager"), types.ControllerConfig.DirPathControllerReport, interval) } + runtimeDB := make([]scheduler.DB, 0, len(s.chainingPlugins)) ctx, cancelFunc := context.WithCancel(context.TODO()) for name, plugin := range s.chainingPlugins { // setup reconcile @@ -115,6 +113,14 @@ func (s *pluginManager) RunControllerController(healthPort int, webhookPort int, logger.Sugar().Debugf("there's no uniqueMatchLabelKey in the configmap, try to use the default '%s'", scheduler.UniqueMatchLabelKey) uniqueMatchLabelKey = scheduler.UniqueMatchLabelKey } + tracker := scheduler.NewTracker(mgr.GetClient(), mgr.GetAPIReader(), scheduler.TrackerConfig{ + ItemChannelBuffer: int(types.ControllerConfig.ResourceTrackerChannelBuffer), + MaxDatabaseCap: int(types.ControllerConfig.ResourceTrackerMaxDatabaseCap), + ExecutorWorkers: int(types.ControllerConfig.ResourceTrackerExecutorWorkers), + SignalTimeOutDuration: time.Duration(types.ControllerConfig.ResourceTrackerSignalTimeoutSeconds) * time.Second, + TraceGapDuration: time.Duration(types.ControllerConfig.ResourceTrackerTraceGapSeconds) * time.Second, + }, logger.Named(name+"Tracker")) + runtimeDB = append(runtimeDB, tracker.DB) k := &pluginControllerReconciler{ logger: logger.Named(name + "Reconciler"), plugin: plugin, @@ -124,13 +130,7 @@ func (s *pluginManager) RunControllerController(healthPort int, webhookPort int, fm: fm, crdKindName: name, runtimeUniqueMatchLabelKey: uniqueMatchLabelKey, - tracker: scheduler.NewTracker(mgr.GetClient(), mgr.GetAPIReader(), scheduler.TrackerConfig{ - ItemChannelBuffer: int(types.ControllerConfig.ResourceTrackerChannelBuffer), - MaxDatabaseCap: int(types.ControllerConfig.ResourceTrackerMaxDatabaseCap), - ExecutorWorkers: int(types.ControllerConfig.ResourceTrackerExecutorWorkers), - SignalTimeOutDuration: time.Duration(types.ControllerConfig.ResourceTrackerSignalTimeoutSeconds) * time.Second, - TraceGapDuration: time.Duration(types.ControllerConfig.ResourceTrackerTraceGapSeconds) * time.Second, - }, logger.Named(name+"Tracker")), + tracker: tracker, } k.tracker.Start(ctx) if e := k.SetupWithManager(mgr); e != nil { @@ -148,6 +148,13 @@ func (s *pluginManager) RunControllerController(healthPort int, webhookPort int, } } + if types.ControllerConfig.EnableAggregateAgentReport { + // reportManager takes charge of sync reports from remote agents + interval := time.Duration(types.ControllerConfig.CollectAgentReportIntervalInSecond) * time.Second + logger.Sugar().Infof("run report Sync manager, save to %v, collectInterval %v ", types.ControllerConfig.DirPathControllerReport, interval) + reportManager.InitReportManager(logger.Named("reportSyncManager"), types.ControllerConfig.DirPathControllerReport, interval, runtimeDB) + } + go func() { msg := "reconcile of plugin down" if e := mgr.Start(ctrl.SetupSignalHandler()); e != nil { diff --git a/pkg/pluginManager/controllerTools.go b/pkg/pluginManager/controllerTools.go index 3fe675db..e42fc063 100644 --- a/pkg/pluginManager/controllerTools.go +++ b/pkg/pluginManager/controllerTools.go @@ -26,7 +26,8 @@ import ( ) func (s *pluginControllerReconciler) GetSpiderAgentNodeNotInRecord(ctx context.Context, succeedNodeList []string, podMatchLabel client.MatchingLabels) ([]string, error) { - var allNodeList, failNodeList []string + allNodeList, failNodeList := []string{}, []string{} + podList := corev1.PodList{} err := s.client.List(ctx, &podList, @@ -50,7 +51,7 @@ func (s *pluginControllerReconciler) GetSpiderAgentNodeNotInRecord(ctx context.C s.logger.Sugar().Debugf("all agent nodes: %v", allNodeList) // gather the failure Node list - slices.Filter(failNodeList, allNodeList, func(s string) bool { + failNodeList = slices.Filter(failNodeList, allNodeList, func(s string) bool { return !slices.Contains(succeedNodeList, s) }) diff --git a/pkg/pluginManager/netreach/agentExecuteTask.go b/pkg/pluginManager/netreach/agentExecuteTask.go index ddde1b57..92967068 100644 --- a/pkg/pluginManager/netreach/agentExecuteTask.go +++ b/pkg/pluginManager/netreach/agentExecuteTask.go @@ -9,7 +9,6 @@ import ( "sync" "go.uber.org/zap" - networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" @@ -20,6 +19,7 @@ import ( "github.com/kdoctor-io/kdoctor/pkg/lock" "github.com/kdoctor-io/kdoctor/pkg/pluginManager/types" config "github.com/kdoctor-io/kdoctor/pkg/types" + runtimetype "github.com/kdoctor-io/kdoctor/pkg/types" ) func ParseSuccessCondition(successCondition *crd.NetSuccessCondition, metricResult *v1beta1.HttpMetrics) (failureReason string, err error) { @@ -93,35 +93,23 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex target := instance.Spec.Target request := instance.Spec.Request successCondition := instance.Spec.SuccessCondition + runtimeResource := instance.Status.Resource testTargetList := []*TestTarget{} // test kdoctor agent logger.Sugar().Infof("load test kdoctor Agent pod: qps=%v, PerRequestTimeout=%vs, Duration=%vs", request.QPS, request.PerRequestTimeoutInMS, request.DurationInSecond) finalfailureReason = "" - // ----------------------- test pod ip if target.Endpoint { - var PodIps k8sObjManager.PodIps - - if target.MultusInterface { - PodIps, e = k8sObjManager.GetK8sObjManager().ListDaemonsetPodMultusIPs(ctx, config.AgentConfig.Configmap.AgentDaemonsetName, config.AgentConfig.PodNamespace) - logger.Sugar().Debugf("test agent multus pod ip: %v", PodIps) + podIPs, e := getTargetPodIP(ctx, runtimeResource.RuntimeName, runtimeResource.RuntimeType, target.MultusInterface) + if e != nil { + logger.Sugar().Debugf("test agent pod ip: %v", podIPs) if e != nil { logger.Sugar().Errorf("failed to ListDaemonsetPodMultusIPs, error=%v", e) finalfailureReason = fmt.Sprintf("failed to ListDaemonsetPodMultusIPs, error=%v", e) } - } else { - PodIps, e = k8sObjManager.GetK8sObjManager().ListDaemonsetPodIPs(ctx, config.AgentConfig.Configmap.AgentDaemonsetName, config.AgentConfig.PodNamespace) - logger.Sugar().Debugf("test agent single pod ip: %v", PodIps) - if e != nil { - logger.Sugar().Errorf("failed to ListDaemonsetPodIPs, error=%v", e) - finalfailureReason = fmt.Sprintf("failed to ListDaemonsetPodIPs, error=%v", e) - } - } - - if len(PodIps) > 0 { - for podname, ips := range PodIps { + for podname, ips := range podIPs { for _, podips := range ips { if len(podips.IPv4) > 0 && (target.IPv4 == nil || (target.IPv4 != nil && *target.IPv4)) { testTargetList = append(testTargetList, &TestTarget{ @@ -133,50 +121,33 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex if len(podips.IPv6) > 0 && (target.IPv6 == nil || (target.IPv6 != nil && *target.IPv6)) { testTargetList = append(testTargetList, &TestTarget{ Name: "AgentPodV6IP_" + podname + "_" + podips.IPv6, - Url: fmt.Sprintf("http://%s:%d", podips.IPv6, config.AgentConfig.AppHttpPort), + Url: fmt.Sprintf("http://[%s]:%d", podips.IPv6, config.AgentConfig.AppHttpPort), Method: loadHttp.HttpMethodGet, }) } } } - } else { - logger.Sugar().Debugf("ignore test agent pod ip") - } - // ----------------------- get service - var agentV4Url, agentV6Url *k8sObjManager.ServiceAccessUrl - serviceAccessPortName := "http" - if config.AgentConfig.Configmap.EnableIPv4 { - agentV4Url, e = k8sObjManager.GetK8sObjManager().GetServiceAccessUrl(ctx, config.AgentConfig.ServiceV4Name, config.AgentConfig.PodNamespace, serviceAccessPortName) - if e != nil { - logger.Sugar().Errorf("failed to get agent ipv4 service url , error=%v", e) - } - } - if config.AgentConfig.Configmap.EnableIPv6 { - agentV6Url, e = k8sObjManager.GetK8sObjManager().GetServiceAccessUrl(ctx, config.AgentConfig.ServiceV6Name, config.AgentConfig.PodNamespace, serviceAccessPortName) - if e != nil { - logger.Sugar().Errorf("failed to get agent ipv6 service url , error=%v", e) - } } + } - var localNodeIpv4, localNodeIpv6 string - if true { - localNodeIpv4, localNodeIpv6, e = k8sObjManager.GetK8sObjManager().GetNodeIP(ctx, config.AgentConfig.LocalNodeName) - if e != nil { - logger.Sugar().Errorf("failed to get local node %v ip, error=%v", config.AgentConfig.LocalNodeName, e) - } else { - logger.Sugar().Debugf("local node %v ip: ipv4=%v, ipv6=%v", config.AgentConfig.LocalNodeName, localNodeIpv4, localNodeIpv6) - } + // get service + var agentV4Url, agentV6Url *k8sObjManager.ServiceAccessUrl + serviceAccessPortName := "http" + if config.AgentConfig.Configmap.EnableIPv4 { + agentV4Url, e = k8sObjManager.GetK8sObjManager().GetServiceAccessUrl(ctx, config.AgentConfig.ServiceV4Name, config.AgentConfig.PodNamespace, serviceAccessPortName) + if e != nil { + logger.Sugar().Errorf("failed to get agent ipv4 service url , error=%v", e) } - - // ----------------------- get ingress - var agentIngress *networkingv1.Ingress - agentIngress, e = k8sObjManager.GetK8sObjManager().GetIngress(ctx, config.AgentConfig.Configmap.AgentIngressName, config.AgentConfig.PodNamespace) + } + if config.AgentConfig.Configmap.EnableIPv6 { + agentV6Url, e = k8sObjManager.GetK8sObjManager().GetServiceAccessUrl(ctx, config.AgentConfig.ServiceV6Name, config.AgentConfig.PodNamespace, serviceAccessPortName) if e != nil { - logger.Sugar().Errorf("failed to get ingress , error=%v", e) + logger.Sugar().Errorf("failed to get agent ipv6 service url , error=%v", e) } - + } + if target.ClusterIP { // ----------------------- test clusterIP ipv4 - if target.ClusterIP && target.IPv4 != nil && *(target.IPv4) { + if target.IPv4 != nil && *(target.IPv4) { if agentV4Url != nil && len(agentV4Url.ClusterIPUrl) > 0 { testTargetList = append(testTargetList, &TestTarget{ Name: "AgentClusterV4IP_" + agentV4Url.ClusterIPUrl[0], @@ -204,9 +175,19 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex } else { logger.Sugar().Debugf("ignore test agent cluster ipv6 ip") } + } + + if target.NodePort { + // get node ip + localNodeIpv4, localNodeIpv6, e := k8sObjManager.GetK8sObjManager().GetNodeIP(ctx, config.AgentConfig.LocalNodeName) + if e != nil { + logger.Sugar().Errorf("failed to get local node %v ip, error=%v", config.AgentConfig.LocalNodeName, e) + } else { + logger.Sugar().Debugf("local node %v ip: ipv4=%v, ipv6=%v", config.AgentConfig.LocalNodeName, localNodeIpv4, localNodeIpv6) + } // ----------------------- test node port - if target.NodePort && target.IPv4 != nil && *(target.IPv4) { + if target.IPv4 != nil && *(target.IPv4) { if agentV4Url != nil && agentV4Url.NodePort != 0 && len(localNodeIpv4) != 0 { testTargetList = append(testTargetList, &TestTarget{ Name: "AgentNodePortV4IP_" + localNodeIpv4 + "_" + fmt.Sprintf("%v", agentV4Url.NodePort), @@ -220,11 +201,11 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex logger.Sugar().Debugf("ignore test agent nodePort ipv4") } - if target.NodePort && target.IPv6 != nil && *(target.IPv6) { + if target.IPv6 != nil && *(target.IPv6) { if agentV6Url != nil && agentV6Url.NodePort != 0 && len(localNodeIpv6) != 0 { testTargetList = append(testTargetList, &TestTarget{ Name: "AgentNodePortV6IP_" + localNodeIpv6 + "_" + fmt.Sprintf("%v", agentV6Url.NodePort), - Url: fmt.Sprintf("http://%s:%d", localNodeIpv6, agentV6Url.NodePort), + Url: fmt.Sprintf("http://[%s]:%d", localNodeIpv6, agentV6Url.NodePort), Method: loadHttp.HttpMethodGet, }) } else { @@ -233,9 +214,10 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex } else { logger.Sugar().Debugf("ignore test agent nodePort ipv6") } + } - // ----------------------- test loadbalancer IP - if target.LoadBalancer && target.IPv4 != nil && *(target.IPv4) { + if target.LoadBalancer { + if target.IPv4 != nil && *(target.IPv4) { if agentV4Url != nil && len(agentV4Url.LoadBalancerUrl) > 0 { testTargetList = append(testTargetList, &TestTarget{ Name: "AgentLoadbalancerV4IP_" + agentV4Url.LoadBalancerUrl[0], @@ -249,7 +231,7 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex logger.Sugar().Debugf("ignore test agent loadbalancer ipv4") } - if target.LoadBalancer && target.IPv6 != nil && *(target.IPv6) { + if target.IPv6 != nil && *(target.IPv6) { if agentV6Url != nil && len(agentV6Url.LoadBalancerUrl) > 0 { testTargetList = append(testTargetList, &TestTarget{ Name: "AgentLoadbalancerV6IP_" + agentV6Url.LoadBalancerUrl[0], @@ -262,9 +244,14 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex } else { logger.Sugar().Debugf("ignore test agent loadbalancer ipv6") } + } - // ----------------------- test ingress - if target.Ingress { + if target.Ingress { + if runtimeResource.ServiceNameV4 != nil { + agentIngress, e := k8sObjManager.GetK8sObjManager().GetIngress(ctx, *runtimeResource.ServiceNameV4, config.AgentConfig.PodNamespace) + if e != nil { + logger.Sugar().Errorf("failed to get v4 ingress , error=%v", e) + } if agentIngress != nil && len(agentIngress.Status.LoadBalancer.Ingress) > 0 { http := "http" if len(agentIngress.Spec.TLS) > 0 { @@ -272,12 +259,12 @@ func (s *PluginNetReach) AgentExecuteTask(logger *zap.Logger, ctx context.Contex } url := fmt.Sprintf("%s://%s%s", http, agentIngress.Status.LoadBalancer.Ingress[0].IP, agentIngress.Spec.Rules[0].HTTP.Paths[0].Path) testTargetList = append(testTargetList, &TestTarget{ - Name: "AgentIngress_" + url, + Name: "AgentIngress_v4_" + url, Url: url, Method: loadHttp.HttpMethodGet, }) } else { - finalfailureReason = "failed to get agent ingress address" + finalfailureReason = "failed to get agent v4 ingress address" } } else { logger.Sugar().Debugf("ignore test agent ingress ipv6") @@ -347,3 +334,26 @@ func (s *PluginNetReach) SetReportWithTask(report *v1beta1.Report, crdSpec inter report.NetReachTask = NetReachTask return nil } + +func getTargetPodIP(ctx context.Context, runtimeName, runtimeKind string, multus bool) (k8sObjManager.PodIps, error) { + var podIPs k8sObjManager.PodIps + var err error + switch runtimeKind { + case runtimetype.KindDaemonSet: + if multus { + podIPs, err = k8sObjManager.GetK8sObjManager().ListDaemonsetPodMultusIPs(ctx, runtimeName, runtimetype.AgentConfig.PodNamespace) + } else { + podIPs, err = k8sObjManager.GetK8sObjManager().ListDaemonsetPodIPs(ctx, runtimeName, runtimetype.AgentConfig.PodNamespace) + } + case runtimetype.KindDeployment: + if multus { + podIPs, err = k8sObjManager.GetK8sObjManager().ListDeployPodMultusIPs(ctx, runtimeName, runtimetype.AgentConfig.PodNamespace) + } else { + podIPs, err = k8sObjManager.GetK8sObjManager().ListDeploymentPodIPs(ctx, runtimeName, runtimetype.AgentConfig.PodNamespace) + } + default: + return podIPs, fmt.Errorf("runtime kind %s not support ", runtimeKind) + } + + return podIPs, err +} diff --git a/pkg/reportManager/manager.go b/pkg/reportManager/manager.go index 746ca5f7..94bdefbf 100644 --- a/pkg/reportManager/manager.go +++ b/pkg/reportManager/manager.go @@ -5,6 +5,7 @@ package reportManager import ( "context" + "github.com/kdoctor-io/kdoctor/pkg/scheduler" "go.uber.org/zap" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" @@ -20,11 +21,12 @@ type reportManager struct { reportDir string collectInterval time.Duration queue workqueue.RateLimitingInterface + runtimeDB []scheduler.DB } var globalReportManager *reportManager -func InitReportManager(logger *zap.Logger, reportDir string, collectInterval time.Duration) { +func InitReportManager(logger *zap.Logger, reportDir string, collectInterval time.Duration, db []scheduler.DB) { if globalReportManager != nil { return } @@ -34,6 +36,7 @@ func InitReportManager(logger *zap.Logger, reportDir string, collectInterval tim reportDir: reportDir, collectInterval: collectInterval, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "reportManager"), + runtimeDB: db, } go globalReportManager.runWorker() } diff --git a/pkg/reportManager/worker.go b/pkg/reportManager/worker.go index afc1f7a9..b3e8e78f 100644 --- a/pkg/reportManager/worker.go +++ b/pkg/reportManager/worker.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/kdoctor-io/kdoctor/pkg/grpcManager" k8sObjManager "github.com/kdoctor-io/kdoctor/pkg/k8ObjManager" + crd "github.com/kdoctor-io/kdoctor/pkg/k8s/apis/kdoctor.io/v1beta1" "github.com/kdoctor-io/kdoctor/pkg/types" "github.com/kdoctor-io/kdoctor/pkg/utils" "go.uber.org/zap" @@ -65,6 +66,11 @@ func (s *reportManager) syncReportFromOneAgent(ctx context.Context, logger *zap. // -- v := strings.Split(remoteFileName, "_") timeSuffix := v[len(v)-1] + taskName := v[1] + if !strings.Contains(podName, taskName) { + logger.Sugar().Debugf("task %s not task of pod %s ,skip sync report %s", taskName, podName, remoteFileName) + continue + } remoteFilePre := strings.TrimSuffix(remoteFileName, "_"+timeSuffix) // file name format: fmt.Sprintf("%s_%s_round%d_%s_%s", kindName, taskName, roundNumber, nodeName, suffix) t := time.Duration(types.ControllerConfig.ReportAgeInDay*24) * time.Hour @@ -93,48 +99,57 @@ func (s *reportManager) runControllerAggregateReportOnce(ctx context.Context, lo return nil } logger.Sugar().Debugf("before sync, local report files: %v", localFileList) + // get all runtime obj + for _, v := range s.runtimeDB { - // get all agent ip - allPodIp, e := k8sObjManager.GetK8sObjManager().ListDaemonsetPodIPs(context.Background(), types.ControllerConfig.Configmap.AgentDaemonsetName, types.ControllerConfig.PodNamespace) - if e != nil { - m := fmt.Sprintf("failed to get agent ip, error=%v", e) - logger.Error(m) - // retry - return fmt.Errorf(m) - } - if len(allPodIp) == 0 { - m := "get empty agent ip" - logger.Error(m) - // retry - return fmt.Errorf(m) - } - - for podName, podIpInfo := range allPodIp { - // get pod ip - if len(podIpInfo) == 0 { - logger.Sugar().Errorf("failed to get agent %s ip ", podName) - continue - } - var podip string - if types.ControllerConfig.Configmap.EnableIPv4 { - podip = podIpInfo[0].IPv4 - } else { - podip = podIpInfo[0].IPv6 - } - if len(podip) == 0 { - logger.Sugar().Errorf("failed to get agent %s ip ", podName) - continue - } + for _, m := range v.List() { + // only aggregate created runtime report + if m.RuntimeStatus != crd.RuntimeCreated { + continue + } + var podIP k8sObjManager.PodIps + var err error + if m.RuntimeKind == types.KindDaemonSet { + podIP, err = k8sObjManager.GetK8sObjManager().ListDaemonsetPodIPs(context.Background(), m.RuntimeName, types.ControllerConfig.PodNamespace) + } + if m.RuntimeKind == types.KindDeployment { + podIP, err = k8sObjManager.GetK8sObjManager().ListDeploymentPodIPs(context.Background(), m.RuntimeName, types.ControllerConfig.PodNamespace) + } + logger.Sugar().Debugf("podIP : %v", podIP) + if err != nil { + m := fmt.Sprintf("failed to get kind %s name %s agent ip, error=%v", m.RuntimeKind, m.RuntimeName, err) + logger.Error(m) + // retry + return fmt.Errorf(m) + } - ip := net.ParseIP(podip) - var address string - if ip.To4() == nil { - address = fmt.Sprintf("[%s]:%d", podip, types.ControllerConfig.AgentGrpcListenPort) - } else { - address = fmt.Sprintf("%s:%d", podip, types.ControllerConfig.AgentGrpcListenPort) + for podName, podIpInfo := range podIP { + // get pod ip + if len(podIpInfo) == 0 { + logger.Sugar().Errorf("failed to get agent %s ip ", podName) + continue + } + var podip string + if types.ControllerConfig.Configmap.EnableIPv4 { + podip = podIpInfo[0].IPv4 + } else { + podip = podIpInfo[0].IPv6 + } + if len(podip) == 0 { + logger.Sugar().Errorf("failed to get agent %s ip ", podName) + continue + } + + ip := net.ParseIP(podip) + var address string + if ip.To4() == nil { + address = fmt.Sprintf("[%s]:%d", podip, types.ControllerConfig.AgentGrpcListenPort) + } else { + address = fmt.Sprintf("%s:%d", podip, types.ControllerConfig.AgentGrpcListenPort) + } + s.syncReportFromOneAgent(ctx, logger, grpcClient, localFileList, podName, address) + } } - s.syncReportFromOneAgent(ctx, logger, grpcClient, localFileList, podName, address) - } return nil diff --git a/pkg/scheduler/schedule.go b/pkg/scheduler/schedule.go index 08317872..935e1eb3 100644 --- a/pkg/scheduler/schedule.go +++ b/pkg/scheduler/schedule.go @@ -6,10 +6,10 @@ package scheduler import ( "context" "fmt" - "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -131,6 +131,16 @@ func (s *Scheduler) CreateTaskRuntimeIfNotExist(ctx context.Context, ownerTask m return v1beta1.TaskResource{}, fmt.Errorf("failed to create runtime IPv4 service for task '%s/%s', error: %w", s.taskKind, s.taskName, err) } resource.ServiceNameV4 = pointer.String(serviceNameV4) + + if s.taskKind == types.KindNameNetReach { + nr := ownerTask.(*v1beta1.NetReach) + if nr.Spec.Target.Ingress { + err = s.createIngress(ctx, serviceNameV4, runtime) + if nil != err { + return v1beta1.TaskResource{}, fmt.Errorf("failed to create runtime IPv4 ingress for task '%s/%s', error: %w", s.taskKind, s.taskName, err) + } + } + } } if types.ControllerConfig.Configmap.EnableIPv6 { serviceNameV6, err := s.createService(ctx, taskRuntimeName, agentSpec, runtime, corev1.IPv6Protocol) @@ -138,6 +148,7 @@ func (s *Scheduler) CreateTaskRuntimeIfNotExist(ctx context.Context, ownerTask m return v1beta1.TaskResource{}, fmt.Errorf("failed to create runtime IPv6 service for task '%s/%s', error: %w", s.taskKind, s.taskName, err) } resource.ServiceNameV6 = pointer.String(serviceNameV6) + } return resource, nil @@ -187,6 +198,45 @@ func (s *Scheduler) createService(ctx context.Context, taskRuntimeName string, a return svcName, nil } +func (s *Scheduler) createIngress(ctx context.Context, serviceName string, ownerRuntime client.Object) error { + needCreate := false + var ingressName = serviceName + var ingress networkingv1.Ingress + objectKey := client.ObjectKey{ + // reuse kdoctor-controller namespace + Namespace: types.ControllerConfig.PodNamespace, + Name: ingressName, + } + s.log.Sugar().Debugf("try to get task '%s/%s' corresponding runtime ingress '%s'", s.taskKind, s.taskName, ingressName) + err := s.apiReader.Get(ctx, objectKey, &ingress) + if nil != err { + if errors.IsNotFound(err) { + s.log.Sugar().Debugf("task '%s/%s' corresponding runtime ingress '%s' not found, try to create one", s.taskKind, s.taskName, ingressName) + needCreate = true + } else { + return err + } + } + + if needCreate { + ingressTmp := s.generateIngress(ingressName) + ingressTmp.SetNamespace(types.ControllerConfig.PodNamespace) + + err := controllerruntime.SetControllerReference(ownerRuntime, ingressTmp, s.client.Scheme()) + if nil != err { + return fmt.Errorf("failed to set ingress %s/%s controllerReference with runtime service %s, error: %v", + types.ControllerConfig.PodNamespace, ingressName, ownerRuntime.GetName(), err) + } + + s.log.Sugar().Infof("try to create task %s/%s corresponding runtime service '%v'", s.taskKind, s.taskName, ingressTmp) + err = s.client.Create(ctx, ingressTmp) + if nil != err { + return err + } + } + return nil +} + func (s *Scheduler) generateDaemonSet(agentSpec v1beta1.AgentSpec) *appsv1.DaemonSet { daemonSet := types.DaemonsetTempl.DeepCopy() @@ -330,3 +380,10 @@ func (s *Scheduler) generateService(agentSpec v1beta1.AgentSpec, ipFamily corev1 return service } + +func (s *Scheduler) generateIngress(serviceName string) *networkingv1.Ingress { + ingress := types.IngressTempl.DeepCopy() + ingress.SetName(serviceName) + ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = serviceName + return ingress +} diff --git a/pkg/types/controller_config.go b/pkg/types/controller_config.go index a387927e..e890eebe 100644 --- a/pkg/types/controller_config.go +++ b/pkg/types/controller_config.go @@ -6,6 +6,7 @@ package types import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" ) var ControllerEnvMapping = []EnvMapping{ @@ -69,6 +70,7 @@ type ControllerConfigStruct struct { ConfigMapDaemonsetPath string ConfigMapPodPath string ConfigMapServicePath string + ConfigMapIngressPath string // -------- from configmap Configmap ConfigmapConfig @@ -82,4 +84,5 @@ var ( DaemonsetTempl = new(appsv1.DaemonSet) PodTempl = new(corev1.Pod) ServiceTempl = new(corev1.Service) + IngressTempl = new(networkingv1.Ingress) ) diff --git a/test/e2e/apphttphealth/apphttphealth_suite_test.go b/test/e2e/apphttphealth/apphttphealth_suite_test.go index ff8bb74b..9ef77fc3 100644 --- a/test/e2e/apphttphealth/apphttphealth_suite_test.go +++ b/test/e2e/apphttphealth/apphttphealth_suite_test.go @@ -63,9 +63,9 @@ var _ = BeforeSuite(func() { e = frame.KClient.Get(context.Background(), key, caSecret) Expect(e).NotTo(HaveOccurred(), "get kdoctor ca secret") - ds, e := frame.GetDaemonSet(common.KDoctorAgentDSName, common.TestNameSpace) - Expect(e).NotTo(HaveOccurred(), "get kdoctor-agent daemonset") - reportNum = int(ds.Status.NumberReady) + nodeLIst, e := frame.GetNodeList() + Expect(e).NotTo(HaveOccurred(), "get node list") + reportNum = len(nodeLIst.Items) testAppName = "app-" + tools.RandomName() testAppNamespace = "ns-" + tools.RandomName() diff --git a/test/e2e/apphttphealth/apphttphealth_test.go b/test/e2e/apphttphealth/apphttphealth_test.go index 05e74e0f..5effa687 100644 --- a/test/e2e/apphttphealth/apphttphealth_test.go +++ b/test/e2e/apphttphealth/apphttphealth_test.go @@ -15,6 +15,7 @@ import ( ) var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { + var termMin = int64(3) It("success http testing appHttpHealth method GET", Label("A00001", "A00011", "C00006"), func() { var e error @@ -26,6 +27,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agent + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -63,8 +69,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) It("failed http testing appHttpHealth due to status code", Label("A00002"), func() { @@ -78,6 +84,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -116,8 +127,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeFalse(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeFalse(), "compare report and task result") }) @@ -131,6 +142,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -168,8 +184,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeFalse(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeFalse(), "compare report and task result") }) @@ -182,6 +198,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -221,8 +242,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) @@ -236,6 +257,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -276,8 +302,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, []string{}, reportNum) - Expect(success).To(BeFalse(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeFalse(), "compare report and task result") }) @@ -291,6 +317,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -328,8 +359,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) @@ -343,6 +374,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -383,8 +419,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) It("Successfully http testing appHttpHealth method HEAD", Label("A00008"), func() { @@ -397,6 +433,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -434,8 +475,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) @@ -449,6 +490,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -486,8 +532,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) @@ -501,6 +547,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -538,8 +589,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) @@ -553,6 +604,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -590,8 +646,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) @@ -605,6 +661,11 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { appHttpHealth := new(v1beta1.AppHttpHealthy) appHttpHealth.Name = appHttpHealthName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + appHttpHealth.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -645,8 +706,8 @@ var _ = Describe("testing appHttpHealth test ", Label("appHttpHealth"), func() { Expect(e).NotTo(HaveOccurred(), "wait appHttpHealth task finish") success, e := common.CompareResult(frame, appHttpHealthName, pluginManager.KindNameAppHttpHealthy, testPodIPs, reportNum) - Expect(success).To(BeTrue(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) }) diff --git a/test/e2e/common/tools.go b/test/e2e/common/tools.go index fad4e3f0..8d50f4f1 100644 --- a/test/e2e/common/tools.go +++ b/test/e2e/common/tools.go @@ -186,12 +186,12 @@ func CompareResult(f *frame.Framework, name, taskKind string, podIPs []string, n // get Aggregate API report var r *kdoctor_report.KdoctorReport var err error - c := time.After(time.Second * 30) + c := time.After(time.Second * 60) r, err = GetPluginReportResult(f, name, n) for err != nil { select { case <-c: - return false, fmt.Errorf("get AppHttpHealth %s report time out,err: %v ", name, err) + return false, fmt.Errorf("get %s %s report time out,err: %v ", taskKind, name, err) default: time.Sleep(time.Second * 5) r, err = GetPluginReportResult(f, name, n) @@ -372,15 +372,19 @@ func CompareResult(f *frame.Framework, name, taskKind string, podIPs []string, n func GetResultFromReport(r *kdoctor_report.KdoctorReport) bool { for _, v := range *r.Spec.Report { if v.NetReachTask != nil { + ginkgo.GinkgoWriter.Println("reach") return v.NetReachTask.Succeed } if v.HttpAppHealthyTask != nil { + ginkgo.GinkgoWriter.Println("app") return v.HttpAppHealthyTask.Succeed } if v.NetDNSTask != nil { + ginkgo.GinkgoWriter.Println("dns") return v.NetDNSTask.Succeed } } + ginkgo.GinkgoWriter.Println("none") return true } diff --git a/test/e2e/netdns/netdns_suite_test.go b/test/e2e/netdns/netdns_suite_test.go index 18a708f3..b6bab055 100644 --- a/test/e2e/netdns/netdns_suite_test.go +++ b/test/e2e/netdns/netdns_suite_test.go @@ -40,9 +40,9 @@ var _ = BeforeSuite(func() { var e error frame, e = e2e.NewFramework(GinkgoT(), []func(*runtime.Scheme) error{kdoctor_v1beta1.AddToScheme}) Expect(e).NotTo(HaveOccurred()) - ds, e := frame.GetDaemonSet(common.KDoctorAgentDSName, common.TestNameSpace) - Expect(e).NotTo(HaveOccurred(), "get kdoctor-agent daemonset") - reportNum = int(ds.Status.NumberReady) + nodeLIst, e := frame.GetNodeList() + Expect(e).NotTo(HaveOccurred(), "get node list") + reportNum = len(nodeLIst.Items) KubeServiceList := &v1.ServiceList{} ops := []client.ListOption{ diff --git a/test/e2e/netdns/netdns_test.go b/test/e2e/netdns/netdns_test.go index 8e2108bf..403be8ec 100644 --- a/test/e2e/netdns/netdns_test.go +++ b/test/e2e/netdns/netdns_test.go @@ -16,7 +16,7 @@ import ( var _ = Describe("testing netDns ", Label("netDns"), func() { var targetDomain = "%s.kubernetes.default.svc.cluster.local" - + var termMin = int64(3) It("Successfully testing Cluster Dns Server case", Label("D00001", "C00005"), func() { var e error successRate := float64(1) @@ -27,6 +27,11 @@ var _ = Describe("testing netDns ", Label("netDns"), func() { netDns := new(v1beta1.Netdns) netDns.Name = netDnsName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + netDns.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -85,6 +90,11 @@ var _ = Describe("testing netDns ", Label("netDns"), func() { netDns := new(v1beta1.Netdns) netDns.Name = netDnsName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + netDns.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -127,8 +137,8 @@ var _ = Describe("testing netDns ", Label("netDns"), func() { Expect(e).NotTo(HaveOccurred(), "wait netDns task finish") success, e := common.CompareResult(frame, netDnsName, pluginManager.KindNameNetdns, testPodIPs, reportNum) - Expect(success).NotTo(BeFalse(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) }) diff --git a/test/e2e/netreach/netreach_suite_test.go b/test/e2e/netreach/netreach_suite_test.go index f3ed3e97..41e48d7c 100644 --- a/test/e2e/netreach/netreach_suite_test.go +++ b/test/e2e/netreach/netreach_suite_test.go @@ -5,13 +5,11 @@ package netreach_test import ( kdoctor_v1beta1 "github.com/kdoctor-io/kdoctor/pkg/k8s/apis/kdoctor.io/v1beta1" - "github.com/kdoctor-io/kdoctor/test/e2e/common" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" e2e "github.com/spidernet-io/e2eframework/framework" "k8s.io/apimachinery/pkg/runtime" "testing" - // "k8s.io/apimachinery/pkg/runtime" ) func TestNetReach(t *testing.T) { @@ -26,9 +24,9 @@ var _ = BeforeSuite(func() { var e error frame, e = e2e.NewFramework(GinkgoT(), []func(*runtime.Scheme) error{kdoctor_v1beta1.AddToScheme}) Expect(e).NotTo(HaveOccurred()) - ds, e := frame.GetDaemonSet(common.KDoctorAgentDSName, common.TestNameSpace) - Expect(e).NotTo(HaveOccurred(), "get kdoctor-agent daemonset") - reportNum = int(ds.Status.NumberReady) + nodeLIst, e := frame.GetNodeList() + Expect(e).NotTo(HaveOccurred(), "get node list") + reportNum = len(nodeLIst.Items) // TODO (ii2day): add agent multus network }) diff --git a/test/e2e/netreach/netreach_test.go b/test/e2e/netreach/netreach_test.go index 626a0f9f..089a6f0b 100644 --- a/test/e2e/netreach/netreach_test.go +++ b/test/e2e/netreach/netreach_test.go @@ -13,7 +13,7 @@ import ( ) var _ = Describe("testing netReach ", Label("netReach"), func() { - + var termMin = int64(3) It("success testing netReach", Label("B00001", "C00004"), func() { var e error successRate := float64(1) @@ -24,6 +24,11 @@ var _ = Describe("testing netReach ", Label("netReach"), func() { netReach := new(v1beta1.NetReach) netReach.Name = netReachName + // agentSpec + agentSpec := new(v1beta1.AgentSpec) + agentSpec.TerminationGracePeriodMinutes = &termMin + netReach.Spec.AgentSpec = *agentSpec + // successCondition successCondition := new(v1beta1.NetSuccessCondition) successCondition.SuccessRate = &successRate @@ -67,8 +72,8 @@ var _ = Describe("testing netReach ", Label("netReach"), func() { Expect(e).NotTo(HaveOccurred(), "wait netReach task finish") success, e := common.CompareResult(frame, netReachName, pluginManager.KindNameNetReach, []string{}, reportNum) - Expect(success).NotTo(BeFalse(), "compare report and task result") Expect(e).NotTo(HaveOccurred(), "compare report and task") + Expect(success).To(BeTrue(), "compare report and task result") }) }) diff --git a/test/scripts/debugCluster.sh b/test/scripts/debugCluster.sh index 8eb89715..7b5eac20 100755 --- a/test/scripts/debugCluster.sh +++ b/test/scripts/debugCluster.sh @@ -21,13 +21,11 @@ echo "$CURRENT_FILENAME : E2E_KUBECONFIG $E2E_KUBECONFIG " COMPONENT_GOROUTINE_MAX=400 COMPONENT_PS_PROCESS_MAX=50 CONTROLLER_LABEL="app.kubernetes.io/component=kdoctor-controller" -AGENT_LABEL="app.kubernetes.io/component=kdoctor-agent" CONTROLLER_POD_LIST=$( kubectl get pods --no-headers --kubeconfig ${E2E_KUBECONFIG} --namespace ${COMPONENT_NAMESPACE} --selector ${CONTROLLER_LABEL} --output jsonpath={.items[*].metadata.name} ) -AGENT_POD_LIST=$( kubectl get pods --no-headers --kubeconfig ${E2E_KUBECONFIG} --namespace ${COMPONENT_NAMESPACE} --selector ${AGENT_LABEL} --output jsonpath={.items[*].metadata.name} ) [ -z "$CONTROLLER_POD_LIST" ] && echo "error, failed to find any kdoctor controller pod" && exit 1 -[ -z "$AGENT_POD_LIST" ] && echo "error! failed to find any kdoctor agent pod" && exit 1 + if [ -n "$E2E_LOG_FILE_NAME" ] ; then