Skip to content

Commit

Permalink
Merge branch 'master' into update_set_tikv_gc_life_time
Browse files Browse the repository at this point in the history
  • Loading branch information
shonge authored Apr 25, 2021
2 parents 2940d94 + 5f1c135 commit 447afdb
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 84 deletions.
49 changes: 39 additions & 10 deletions pkg/monitor/monitor/monitor_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,25 +115,34 @@ func (m *MonitorManager) SyncMonitor(monitor *v1alpha1.TidbMonitor) error {
}
}
}
// create or update tls asset secret
err := m.syncAssetSecret(monitor, assetStore)
if err != nil {
return err
}
var firstDc *v1alpha1.DMCluster
if monitor.Spec.DM != nil {
for _, dcRef := range monitor.Spec.DM.Clusters {
dc, err := m.deps.DMClusterLister.DMClusters(dcRef.Namespace).Get(dcRef.Name)
if err != nil {
rerr := fmt.Errorf("get tm[%s/%s]'s target dc[%s/%s] failed, err: %v", monitor.Namespace, monitor.Name, dcRef.Namespace, dcRef.Name, err)
return rerr
} else {
}
if firstDc == nil {
firstDc = dc
break
}
// If cluster enable tls
if dc.IsTLSClusterEnabled() {
dmTlsSecretName := util.DMClientTLSSecretName(dcRef.Name)
err := assetStore.addTLSAssets(dcRef.Namespace, dmTlsSecretName)
if err != nil {
return err
}
}
}
}

// create or update tls asset secret
err := m.syncAssetSecret(monitor, assetStore)
if err != nil {
return err
}

// Sync Service
if err := m.syncTidbMonitorService(monitor); err != nil {
message := fmt.Sprintf("Sync TidbMonitor[%s/%s] Service failed, err: %v", monitor.Namespace, monitor.Name, err)
Expand Down Expand Up @@ -187,7 +196,7 @@ func (m *MonitorManager) syncTidbMonitorService(monitor *v1alpha1.TidbMonitor) e
func (m *MonitorManager) syncTidbMonitorStatefulset(tc *v1alpha1.TidbCluster, dc *v1alpha1.DMCluster, monitor *v1alpha1.TidbMonitor) error {
ns := monitor.Namespace
name := monitor.Name
cm, err := m.syncTidbMonitorConfig(dc, monitor)
cm, err := m.syncTidbMonitorConfig(monitor)
if err != nil {
klog.Errorf("tm[%s/%s]'s configmap failed to sync,err: %v", ns, name, err)
return err
Expand Down Expand Up @@ -246,7 +255,7 @@ func (m *MonitorManager) syncTidbMonitorSecret(monitor *v1alpha1.TidbMonitor) (*
return m.deps.TypedControl.CreateOrUpdateSecret(monitor, newSt)
}

func (m *MonitorManager) syncTidbMonitorConfig(dc *v1alpha1.DMCluster, monitor *v1alpha1.TidbMonitor) (*corev1.ConfigMap, error) {
func (m *MonitorManager) syncTidbMonitorConfig(monitor *v1alpha1.TidbMonitor) (*corev1.ConfigMap, error) {
if features.DefaultFeatureGate.Enabled(features.AutoScaling) {
// TODO: We need to update the status to tell users we are monitoring extra clusters
// Get all autoscaling clusters for TC, and add them to .Spec.Clusters to
Expand Down Expand Up @@ -308,7 +317,27 @@ func (m *MonitorManager) syncTidbMonitorConfig(dc *v1alpha1.DMCluster, monitor *
monitorClusterInfos = append(monitorClusterInfos, clusterRegex)
}

newCM, err := getMonitorConfigMap(dc, monitor, monitorClusterInfos)
var dmClusterInfos []ClusterRegexInfo
if monitor.Spec.DM != nil {
for _, dmRef := range monitor.Spec.DM.Clusters {
dm, err := m.deps.DMClusterLister.DMClusters(dmRef.Namespace).Get(dmRef.Name)
if err != nil {
rerr := fmt.Errorf("get tm[%s/%s]'s target dm[%s/%s] failed, err: %v", monitor.Namespace, monitor.Name, dmRef.Namespace, dmRef.Name, err)
return nil, rerr
}
clusterRegex := ClusterRegexInfo{
Name: dmRef.Name,
Namespace: dmRef.Namespace,
}
// If cluster enable tls
if dm.IsTLSClusterEnabled() {
clusterRegex.enableTLS = true
}
dmClusterInfos = append(dmClusterInfos, clusterRegex)
}
}

newCM, err := getMonitorConfigMap(monitor, monitorClusterInfos, dmClusterInfos)
if err != nil {
return nil, err
}
Expand Down
10 changes: 5 additions & 5 deletions pkg/monitor/monitor/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,6 @@ type MonitorConfigModel struct {
AlertmanagerURL string
ClusterInfos []ClusterRegexInfo
DMClusterInfos []ClusterRegexInfo
EnableTLSDMCluster bool
ExternalLabels model.LabelSet
RemoteWriteConfigs []*config.RemoteWriteConfig
}
Expand Down Expand Up @@ -423,12 +422,13 @@ func scrapeJob(jobName string, componentPattern config.Regexp, cmodel *MonitorCo
}
}

if cmodel.EnableTLSDMCluster && isDMJob(jobName) {
if cluster.enableTLS && isDMJob(jobName) {
scrapeconfig.Scheme = "https"
dmTlsSecretName := util.DMClientTLSSecretName(cluster.Name)
scrapeconfig.HTTPClientConfig.TLSConfig = config.TLSConfig{
CAFile: path.Join(util.DMClusterClientTLSPath, corev1.ServiceAccountRootCAKey),
CertFile: path.Join(util.DMClusterClientTLSPath, corev1.TLSCertKey),
KeyFile: path.Join(util.DMClusterClientTLSPath, corev1.TLSPrivateKeyKey),
CAFile: path.Join(util.DMClusterClientTLSPath, TLSAssetKey{"secret", cluster.Namespace, dmTlsSecretName, corev1.ServiceAccountRootCAKey}.String()),
CertFile: path.Join(util.DMClusterClientTLSPath, TLSAssetKey{"secret", cluster.Namespace, dmTlsSecretName, corev1.TLSCertKey}.String()),
KeyFile: path.Join(util.DMClusterClientTLSPath, TLSAssetKey{"secret", cluster.Namespace, dmTlsSecretName, corev1.TLSPrivateKeyKey}.String()),
}
}
scrapeJobs = append(scrapeJobs, scrapeconfig)
Expand Down
37 changes: 16 additions & 21 deletions pkg/monitor/monitor/template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,8 +335,7 @@ remote_write:
DMClusterInfos: []ClusterRegexInfo{
{Name: "target", Namespace: "ns1"},
},
EnableTLSDMCluster: false,
AlertmanagerURL: "alert-url",
AlertmanagerURL: "alert-url",
RemoteWriteConfigs: []*config.RemoteWriteConfig{
{
URL: &config.URL{URL: url},
Expand Down Expand Up @@ -909,9 +908,9 @@ scrape_configs:
names:
- ns1
tls_config:
ca_file: /var/lib/dm-cluster-client-tls/ca.crt
cert_file: /var/lib/dm-cluster-client-tls/tls.crt
key_file: /var/lib/dm-cluster-client-tls/tls.key
ca_file: /var/lib/dm-cluster-client-tls/secret_ns1_target-dm-client-secret_ca.crt
cert_file: /var/lib/dm-cluster-client-tls/secret_ns1_target-dm-client-secret_tls.crt
key_file: /var/lib/dm-cluster-client-tls/secret_ns1_target-dm-client-secret_tls.key
insecure_skip_verify: false
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
Expand Down Expand Up @@ -962,9 +961,9 @@ scrape_configs:
names:
- ns1
tls_config:
ca_file: /var/lib/dm-cluster-client-tls/ca.crt
cert_file: /var/lib/dm-cluster-client-tls/tls.crt
key_file: /var/lib/dm-cluster-client-tls/tls.key
ca_file: /var/lib/dm-cluster-client-tls/secret_ns1_target-dm-client-secret_ca.crt
cert_file: /var/lib/dm-cluster-client-tls/secret_ns1_target-dm-client-secret_tls.crt
key_file: /var/lib/dm-cluster-client-tls/secret_ns1_target-dm-client-secret_tls.key
insecure_skip_verify: false
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
Expand Down Expand Up @@ -1010,9 +1009,8 @@ scrape_configs:
{Name: "target", Namespace: "ns1", enableTLS: true},
},
DMClusterInfos: []ClusterRegexInfo{
{Name: "target", Namespace: "ns1"},
{Name: "target", Namespace: "ns1", enableTLS: true},
},
EnableTLSDMCluster: true,
}
content, err := RenderPrometheusConfig(model)
g.Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -1048,8 +1046,7 @@ func TestMultipleClusterConfigRender(t *testing.T) {
{Name: "ns1", Namespace: "ns1"},
{Name: "ns2", Namespace: "ns2"},
},
EnableTLSDMCluster: false,
AlertmanagerURL: "alert-url",
AlertmanagerURL: "alert-url",
}
// firsrt validate json generate normally
_, err := RenderPrometheusConfig(model)
Expand All @@ -1067,11 +1064,10 @@ func TestMultipleClusterTlsConfigRender(t *testing.T) {
{Name: "ns2", Namespace: "ns2", enableTLS: true},
},
DMClusterInfos: []ClusterRegexInfo{
{Name: "ns1", Namespace: "ns1"},
{Name: "ns2", Namespace: "ns2"},
{Name: "ns1", Namespace: "ns1", enableTLS: true},
{Name: "ns2", Namespace: "ns2", enableTLS: true},
},
EnableTLSDMCluster: true,
AlertmanagerURL: "alert-url",
AlertmanagerURL: "alert-url",
}
// firsrt validate json generate normally
_, err := RenderPrometheusConfig(model)
Expand Down Expand Up @@ -1113,11 +1109,10 @@ func TestScrapeJob(t *testing.T) {
}

model := &MonitorConfigModel{
AlertmanagerURL: "",
ClusterInfos: ClusterInfos,
DMClusterInfos: nil,
ExternalLabels: buildExternalLabels(tm),
EnableTLSDMCluster: false,
AlertmanagerURL: "",
ClusterInfos: ClusterInfos,
DMClusterInfos: nil,
ExternalLabels: buildExternalLabels(tm),
}
scrapeJobs := scrapeJob("pd", pdPattern, model, buildAddressRelabelConfigByComponent("pd"))
tcTlsSecretName := util.ClusterClientTLSSecretName(name)
Expand Down
42 changes: 5 additions & 37 deletions pkg/monitor/monitor/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,24 +118,12 @@ func getAlertManagerRulesVersion(tc *v1alpha1.TidbCluster, monitor *v1alpha1.Tid

// getMonitorConfigMap generate the Prometheus config and Grafana config for TidbMonitor,
// If the namespace in ClusterRef is empty, we would set the TidbMonitor's namespace in the default
func getMonitorConfigMap(dc *v1alpha1.DMCluster, monitor *v1alpha1.TidbMonitor, monitorClusterInfos []ClusterRegexInfo) (*core.ConfigMap, error) {

var releaseDMClusterInfos []ClusterRegexInfo
if monitor.Spec.DM != nil {
for _, dmcluster := range monitor.Spec.DM.Clusters {
releaseDMClusterInfos = append(releaseDMClusterInfos, ClusterRegexInfo{
Name: dmcluster.Name,
Namespace: dmcluster.Namespace,
})
}
}

func getMonitorConfigMap(monitor *v1alpha1.TidbMonitor, monitorClusterInfos []ClusterRegexInfo, dmClusterInfos []ClusterRegexInfo) (*core.ConfigMap, error) {
model := &MonitorConfigModel{
AlertmanagerURL: "",
ClusterInfos: monitorClusterInfos,
DMClusterInfos: releaseDMClusterInfos,
ExternalLabels: buildExternalLabels(monitor),
EnableTLSDMCluster: dc != nil && dc.IsTLSClusterEnabled(),
AlertmanagerURL: "",
ClusterInfos: monitorClusterInfos,
DMClusterInfos: dmClusterInfos,
ExternalLabels: buildExternalLabels(monitor),
}

if len(monitor.Spec.Prometheus.RemoteWrite) > 0 {
Expand Down Expand Up @@ -492,13 +480,6 @@ func getMonitorPrometheusContainer(monitor *v1alpha1.TidbMonitor, tc *v1alpha1.T
commands = append(commands, "--storage.tsdb.min-block-duration=2h")
}
c.Command = append(c.Command, strings.Join(commands, " "))
if dc != nil && dc.IsTLSClusterEnabled() {
c.VolumeMounts = append(c.VolumeMounts, core.VolumeMount{
Name: util.DMClusterClientVolName,
MountPath: util.DMClusterClientTLSPath,
ReadOnly: true,
})
}
if monitor.Spec.Prometheus.ImagePullPolicy != nil {
c.ImagePullPolicy = *monitor.Spec.Prometheus.ImagePullPolicy
}
Expand Down Expand Up @@ -699,19 +680,6 @@ func getMonitorVolumes(config *core.ConfigMap, monitor *v1alpha1.TidbMonitor, tc
}
volumes = append(volumes, prometheusRules)

if dc != nil && dc.IsTLSClusterEnabled() {
defaultMode := int32(420)
tlsDMClient := core.Volume{
Name: util.DMClusterClientVolName,
VolumeSource: core.VolumeSource{
Secret: &core.SecretVolumeSource{
SecretName: util.DMClientTLSSecretName(dc.Name),
DefaultMode: &defaultMode,
},
},
}
volumes = append(volumes, tlsDMClient)
}
volumes = append(volumes, core.Volume{
Name: "prometheus-config-out",
VolumeSource: core.VolumeSource{
Expand Down
13 changes: 2 additions & 11 deletions pkg/monitor/monitor/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ func TestGetMonitorConfigMap(t *testing.T) {

for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
cm, err := getMonitorConfigMap(&tt.dmCluster, &tt.monitor, tt.monitorClusterInfos)
cm, err := getMonitorConfigMap(&tt.monitor, tt.monitorClusterInfos, nil)
g.Expect(err).NotTo(HaveOccurred())
if tt.expected == nil {
g.Expect(cm).To(BeNil())
Expand Down Expand Up @@ -857,15 +857,6 @@ func TestGetMonitorVolumes(t *testing.T) {
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
{
Name: "dm-cluster-client-tls",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "foodm-dm-client-secret",
DefaultMode: pointer.Int32Ptr(420),
},
},
},
{
Name: "prometheus-config-out",
VolumeSource: corev1.VolumeSource{
Expand All @@ -889,7 +880,7 @@ func TestGetMonitorVolumes(t *testing.T) {

for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
cm, err := getMonitorConfigMap(&tt.dmCluster, &tt.monitor, nil)
cm, err := getMonitorConfigMap(&tt.monitor, nil, nil)
g.Expect(err).NotTo(HaveOccurred())
sa := getMonitorVolumes(cm, &tt.monitor, &tt.cluster, &tt.dmCluster)
tt.expected(sa)
Expand Down

0 comments on commit 447afdb

Please sign in to comment.