Skip to content

Commit

Permalink
feat: Add support for Istio multicluster (#1274)
Browse files Browse the repository at this point in the history
Signed-off-by: Shakti <shaktiprakash.das@salesforce.com>
  • Loading branch information
shakti-das authored Jul 8, 2021
1 parent 503c520 commit ba21c6c
Show file tree
Hide file tree
Showing 9 changed files with 275 additions and 9 deletions.
1 change: 1 addition & 0 deletions USERS.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,4 @@ Organizations below are **officially** using Argo Rollouts. Please send a PR wit
1. [PayPal](https://www.paypal.com/)
1. [Shipt](https://www.shipt.com/)
1. [Nitro](https://www.gonitro.com)
1. [Salesforce](https://www.salesforce.com/)
18 changes: 12 additions & 6 deletions cmd/rollouts-controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
"github.com/argoproj/argo-rollouts/rollout/trafficrouting/smi"
controllerutil "github.com/argoproj/argo-rollouts/utils/controller"
"github.com/argoproj/argo-rollouts/utils/defaults"
"github.com/argoproj/argo-rollouts/utils/istio"
istioutil "github.com/argoproj/argo-rollouts/utils/istio"
logutil "github.com/argoproj/argo-rollouts/utils/log"
"github.com/argoproj/argo-rollouts/utils/tolerantinformer"
Expand Down Expand Up @@ -84,7 +83,7 @@ func newCommand() *cobra.Command {
stopCh := signals.SetupSignalHandler()

alb.SetDefaultVerifyWeight(albVerifyWeight)
istio.SetIstioAPIVersion(istioVersion)
istioutil.SetIstioAPIVersion(istioVersion)
ambassador.SetAPIVersion(ambassadorVersion)
smi.SetSMIAPIVersion(trafficSplitVersion)

Expand All @@ -97,8 +96,6 @@ func newCommand() *cobra.Command {
namespace = configNS
log.Infof("Using namespace %s", namespace)
}
k8sRequestProvider := &metrics.K8sRequestsCountProvider{}
kubeclientmetrics.AddMetricsTransportWrapper(config, k8sRequestProvider.IncKubernetesRequest)

kubeClient, err := kubernetes.NewForConfig(config)
checkError(err)
Expand Down Expand Up @@ -134,14 +131,22 @@ func newCommand() *cobra.Command {
// a single namespace (i.e. rollouts-controller --namespace foo).
clusterDynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicClient, resyncDuration, metav1.NamespaceAll, instanceIDTweakListFunc)
// 3. We finally need an istio dynamic informer factory which does not use a tweakListFunc.
istioDynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicClient, resyncDuration, namespace, nil)
_, istioPrimaryDynamicClient := istioutil.GetPrimaryClusterDynamicClient(kubeClient, namespace)
if istioPrimaryDynamicClient == nil {
istioPrimaryDynamicClient = dynamicClient
}
istioDynamicInformerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(istioPrimaryDynamicClient, resyncDuration, namespace, nil)

controllerNamespaceInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(
kubeClient,
resyncDuration,
kubeinformers.WithNamespace(defaults.Namespace()))
configMapInformer := controllerNamespaceInformerFactory.Core().V1().ConfigMaps()
secretInformer := controllerNamespaceInformerFactory.Core().V1().Secrets()

k8sRequestProvider := &metrics.K8sRequestsCountProvider{}
kubeclientmetrics.AddMetricsTransportWrapper(config, k8sRequestProvider.IncKubernetesRequest)

cm := controller.NewManager(
namespace,
kubeClient,
Expand All @@ -158,6 +163,7 @@ func newCommand() *cobra.Command {
tolerantinformer.NewTolerantAnalysisRunInformer(dynamicInformerFactory),
tolerantinformer.NewTolerantAnalysisTemplateInformer(dynamicInformerFactory),
tolerantinformer.NewTolerantClusterAnalysisTemplateInformer(clusterDynamicInformerFactory),
istioPrimaryDynamicClient,
istioDynamicInformerFactory.ForResource(istioutil.GetIstioVirtualServiceGVR()).Informer(),
istioDynamicInformerFactory.ForResource(istioutil.GetIstioDestinationRuleGVR()).Informer(),
configMapInformer,
Expand All @@ -179,7 +185,7 @@ func newCommand() *cobra.Command {
jobInformerFactory.Start(stopCh)

// Check if Istio installed on cluster before starting dynamicInformerFactory
if istioutil.DoesIstioExist(dynamicClient, namespace) {
if istioutil.DoesIstioExist(istioPrimaryDynamicClient, namespace) {
istioDynamicInformerFactory.Start(stopCh)
}

Expand Down
2 changes: 2 additions & 0 deletions controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ func NewManager(
analysisRunInformer informers.AnalysisRunInformer,
analysisTemplateInformer informers.AnalysisTemplateInformer,
clusterAnalysisTemplateInformer informers.ClusterAnalysisTemplateInformer,
istioPrimaryDynamicClient dynamic.Interface,
istioVirtualServiceInformer cache.SharedIndexInformer,
istioDestinationRuleInformer cache.SharedIndexInformer,
configMapInformer coreinformers.ConfigMapInformer,
Expand Down Expand Up @@ -175,6 +176,7 @@ func NewManager(
AnalysisRunInformer: analysisRunInformer,
AnalysisTemplateInformer: analysisTemplateInformer,
ClusterAnalysisTemplateInformer: clusterAnalysisTemplateInformer,
IstioPrimaryDynamicClient: istioPrimaryDynamicClient,
IstioVirtualServiceInformer: istioVirtualServiceInformer,
IstioDestinationRuleInformer: istioDestinationRuleInformer,
ReplicaSetInformer: replicaSetInformer,
Expand Down
66 changes: 66 additions & 0 deletions docs/features/traffic-management/istio.md
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,72 @@ During the lifecycle of a Rollout using Istio DestinationRule, Argo Rollouts wil
label of the canary and stable ReplicaSets


## Multicluster Setup
If you have [Istio multicluster setup](https://istio.io/latest/docs/setup/install/multicluster/)
where the primary Istio cluster is different than the cluster where the Argo Rollout controller
is running, then you need to do the following setup:

1. Create a `ServiceAccount` in the Istio primary cluster.
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-rollouts-istio-primary
namespace: <any-namespace-preferrably-config-namespace>
```
2. Create a `ClusterRole` that provides access to Rollout controller in the Istio primary cluster.
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-rollouts-istio-primary
rules:
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- destinationrules
verbs:
- get
- list
- watch
- update
- patch
```
Note: If Argo Rollout controller is also installed in the Istio primary cluster, then you can reuse the
`argo-rollouts-clusterrole` ClusterRole instead of creating a new one.
3. Link the `ClusterRole` with the `ServiceAccount` in the Istio primary cluster.
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-rollouts-istio-primary
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argo-rollouts-istio-primary
subjects:
- kind: ServiceAccount
name: argo-rollouts-istio-primary
namespace: <namespace-of-the-service-account>
```
4. Now, use the following command to generate a secret for Rollout controller to access the Istio primary cluster.
This secret will be applied to the cluster where Argo Rollout is running (i.e, Istio remote cluster),
but will be generated from the Istio primary cluster. This secret can be generated right after Step 1,
it only requires `ServiceAccount` to exist.
[Reference to the command](https://istio.io/latest/docs/reference/commands/istioctl/#istioctl-experimental-create-remote-secret).
```shell
istioctl x create-remote-secret --type remote --name <cluster-name> \
--namespace <namespace-of-the-service-account> \
--service-account <service-account-created-in-step1> \
--context="<ISTIO_PRIMARY_CLUSTER>" | \
kubectl apply -f - --context="<ARGO_ROLLOUT_CLUSTER/ISTIO_REMOTE_CLUSTER>"
```
5. Label the secret.
```shell
kubectl label secret <istio-remote-secret> istio.argoproj.io/primary-cluster="true" -n <namespace-of-the-secret>
```

## Comparison Between Approaches

There are some advantages and disadvantages of host-level traffic splitting vs. subset-level traffic
Expand Down
3 changes: 2 additions & 1 deletion rollout/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ type ControllerConfig struct {
ServicesInformer coreinformers.ServiceInformer
IngressInformer extensionsinformers.IngressInformer
RolloutsInformer informers.RolloutInformer
IstioPrimaryDynamicClient dynamic.Interface
IstioVirtualServiceInformer cache.SharedIndexInformer
IstioDestinationRuleInformer cache.SharedIndexInformer
ResyncPeriod time.Duration
Expand Down Expand Up @@ -203,7 +204,7 @@ func NewController(cfg ControllerConfig) *Controller {

controller.IstioController = istio.NewIstioController(istio.IstioControllerConfig{
ArgoprojClientSet: cfg.ArgoProjClientset,
DynamicClientSet: cfg.DynamicClientSet,
DynamicClientSet: cfg.IstioPrimaryDynamicClient,
EnqueueRollout: controller.enqueueRollout,
RolloutsInformer: cfg.RolloutsInformer,
VirtualServiceInformer: cfg.IstioVirtualServiceInformer,
Expand Down
1 change: 1 addition & 0 deletions rollout/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,7 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share
ServicesInformer: k8sI.Core().V1().Services(),
IngressInformer: k8sI.Extensions().V1beta1().Ingresses(),
RolloutsInformer: i.Argoproj().V1alpha1().Rollouts(),
IstioPrimaryDynamicClient: dynamicClient,
IstioVirtualServiceInformer: istioVirtualServiceInformer,
IstioDestinationRuleInformer: istioDestinationRuleInformer,
ResyncPeriod: resync(),
Expand Down
4 changes: 2 additions & 2 deletions rollout/trafficrouting.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ func (c *Controller) NewTrafficRoutingReconciler(roCtx *rolloutContext) (Traffic
}
if rollout.Spec.Strategy.Canary.TrafficRouting.Istio != nil {
if c.IstioController.VirtualServiceInformer.HasSynced() {
return istio.NewReconciler(rollout, c.dynamicclientset, c.recorder, c.IstioController.VirtualServiceLister, c.IstioController.DestinationRuleLister), nil
return istio.NewReconciler(rollout, c.IstioController.DynamicClientSet, c.recorder, c.IstioController.VirtualServiceLister, c.IstioController.DestinationRuleLister), nil
} else {
return istio.NewReconciler(rollout, c.dynamicclientset, c.recorder, nil, nil), nil
return istio.NewReconciler(rollout, c.IstioController.DynamicClientSet, c.recorder, nil, nil), nil
}
}
if rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil {
Expand Down
95 changes: 95 additions & 0 deletions utils/istio/multicluster.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
package istio

import (
"context"
"errors"
"fmt"

log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)

const (
PrimaryClusterSecretLabel = "istio.argoproj.io/primary-cluster"
)

func GetPrimaryClusterDynamicClient(kubeClient kubernetes.Interface, namespace string) (string, dynamic.Interface) {
primaryClusterSecret := getPrimaryClusterSecret(kubeClient, namespace)
if primaryClusterSecret != nil {
clusterId, clientConfig, err := getKubeClientConfig(primaryClusterSecret)
if err != nil {
return clusterId, nil
}

config, err := clientConfig.ClientConfig()
if err != nil {
log.Errorf("Error fetching primary ClientConfig: %v", err)
return clusterId, nil
}

dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
log.Errorf("Error building dynamic client from config: %v", err)
return clusterId, nil
}

return clusterId, dynamicClient
}

return "", nil
}

func getPrimaryClusterSecret(kubeClient kubernetes.Interface, namespace string) *corev1.Secret {
req, err := labels.NewRequirement(PrimaryClusterSecretLabel, selection.Equals, []string{"true"})
if err != nil {
return nil
}

secrets, err := kubeClient.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{Limit: 1, LabelSelector: req.String()})
if err != nil {
return nil
}

if secrets != nil && len(secrets.Items) > 0 {
return &secrets.Items[0]
}

return nil
}

func getKubeClientConfig(secret *corev1.Secret) (string, clientcmd.ClientConfig, error) {
for clusterId, kubeConfig := range secret.Data {
primaryClusterConfig, err := buildKubeClientConfig(kubeConfig)
if err != nil {
log.Errorf("Error building kubeconfig for primary cluster %s: %v", clusterId, err)
return clusterId, nil, fmt.Errorf("error building primary cluster client %s: %v", clusterId, err)
}
log.Infof("Istio primary/config cluster is %s", clusterId)
return clusterId, primaryClusterConfig, err
}
return "", nil, nil
}

func buildKubeClientConfig(kubeConfig []byte) (clientcmd.ClientConfig, error) {
if len(kubeConfig) == 0 {
return nil, errors.New("kubeconfig is empty")
}

rawConfig, err := clientcmd.Load(kubeConfig)
if err != nil {
return nil, fmt.Errorf("kubeconfig cannot be loaded: %v", err)
}

if err := clientcmd.Validate(*rawConfig); err != nil {
return nil, fmt.Errorf("kubeconfig is not valid: %v", err)
}

clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, &clientcmd.ConfigOverrides{})
return clientConfig, nil
}
94 changes: 94 additions & 0 deletions utils/istio/multicluster_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
package istio

import (
"testing"

"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
)

var (
secret0 = makeSecret("secret0", "namespace0", "primary0", []byte("kubeconfig0-0"))
secret1 = makeSecret("secret1", "namespace1", "primary1", []byte("kubeconfig1-1"))
rolloutControllerNamespace = "argo-rollout-ns"
)

func TestGetPrimaryClusterDynamicClient(t *testing.T) {
testCases := []struct {
name string
namespace string
existingSecrets []*v1.Secret
expectedClusterId string
}{
{
"TestNoPrimaryClusterSecret",
metav1.NamespaceAll,
nil,
"",
},
{
"TestPrimaryClusterSingleSecret",
metav1.NamespaceAll,
[]*v1.Secret{
secret0,
},
"primary0",
},
{
"TestPrimaryClusterMultipleSecrets",
metav1.NamespaceAll,
[]*v1.Secret{
secret0,
secret1,
},
"primary0",
},
{
"TestPrimaryClusterNoSecretInNamespaceForNamespacedController",
rolloutControllerNamespace,
[]*v1.Secret{
secret0,
},
"",
},
{
"TestPrimaryClusterSingleSecretInNamespaceForNamespacedController",
rolloutControllerNamespace,
[]*v1.Secret{
makeSecret("secret0", rolloutControllerNamespace, "primary0", []byte("kubeconfig0-0")),
},
"primary0",
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var existingObjs []runtime.Object
for _, s := range tc.existingSecrets {
existingObjs = append(existingObjs, s)
}

client := fake.NewSimpleClientset(existingObjs...)
clusterId, _ := GetPrimaryClusterDynamicClient(client, tc.namespace)
assert.Equal(t, tc.expectedClusterId, clusterId)
})
}
}

func makeSecret(secret, namespace, clusterID string, kubeconfig []byte) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secret,
Namespace: namespace,
Labels: map[string]string{
PrimaryClusterSecretLabel: "true",
},
},
Data: map[string][]byte{
clusterID: kubeconfig,
},
}
}

0 comments on commit ba21c6c

Please sign in to comment.