From 0fd372db2aee70e3637836111f79b1226f710ee4 Mon Sep 17 00:00:00 2001 From: Ran Date: Mon, 16 Oct 2023 18:48:30 +0800 Subject: [PATCH] add collapsible topology files (#15210) --- config-templates/complex-cdc.yaml | 139 ------- config-templates/complex-mini.yaml | 112 ------ config-templates/complex-multi-instance.yaml | 150 ------- config-templates/complex-tidb-binlog.yaml | 175 --------- config-templates/complex-tiflash.yaml | 157 -------- config-templates/complex-tispark.yaml | 150 ------- config-templates/em-topology-config.yaml | 98 ----- .../geo-redundancy-deployment.yaml | 115 ------ config-templates/simple-cdc.yaml | 36 -- config-templates/simple-file-binlog.yaml | 47 --- config-templates/simple-mini.yaml | 30 -- config-templates/simple-multi-instance.yaml | 95 ----- config-templates/simple-tidb-binlog.yaml | 47 --- config-templates/simple-tiflash.yaml | 40 -- config-templates/simple-tispark.yaml | 45 --- geo-distributed-deployment-topology.md | 123 +++++- hybrid-deployment-topology.md | 263 ++++++++++++- minimal-deployment-topology.md | 158 +++++++- production-deployment-using-tiup.md | 14 +- ticdc-deployment-topology.md | 191 ++++++++- tidb-binlog-deployment-topology.md | 293 +++++++++++++- tiflash-deployment-topology.md | 369 +++++++++++++++++- tiflash/tiflash-configuration.md | 2 +- tispark-deployment-topology.md | 211 +++++++++- .../tiunimanager-install-and-maintain.md | 114 +++++- 25 files changed, 1711 insertions(+), 1463 deletions(-) delete mode 100644 config-templates/complex-cdc.yaml delete mode 100644 config-templates/complex-mini.yaml delete mode 100644 config-templates/complex-multi-instance.yaml delete mode 100644 config-templates/complex-tidb-binlog.yaml delete mode 100644 config-templates/complex-tiflash.yaml delete mode 100644 config-templates/complex-tispark.yaml delete mode 100644 config-templates/em-topology-config.yaml delete mode 100644 config-templates/geo-redundancy-deployment.yaml delete mode 100644 config-templates/simple-cdc.yaml delete mode 100644 config-templates/simple-file-binlog.yaml delete mode 100644 config-templates/simple-mini.yaml delete mode 100644 config-templates/simple-multi-instance.yaml delete mode 100644 config-templates/simple-tidb-binlog.yaml delete mode 100644 config-templates/simple-tiflash.yaml delete mode 100644 config-templates/simple-tispark.yaml diff --git a/config-templates/complex-cdc.yaml b/config-templates/complex-cdc.yaml deleted file mode 100644 index 77cebde5b6c8..000000000000 --- a/config-templates/complex-cdc.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -# # Monitored variables are applied to all the machines. -monitored: - node_exporter_port: 9100 - blackbox_exporter_port: 9115 - # deploy_dir: "/tidb-deploy/monitored-9100" - # data_dir: "/tidb-data/monitored-9100" - # log_dir: "/tidb-deploy/monitored-9100/log" - -# # Server configs are used to specify the runtime configuration of TiDB components. -# # All configuration items can be found in TiDB docs: -# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file -# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file -# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file -# # All configuration items use points to represent the hierarchy, e.g: -# # readpool.storage.use-unified-pool -# # -# # You can overwrite this configuration via the instance-level `config` field. - -server_configs: - tidb: - log.slow-threshold: 300 - tikv: - # server.grpc-concurrency: 4 - # raftstore.apply-pool-size: 2 - # raftstore.store-pool-size: 2 - # rocksdb.max-sub-compactions: 1 - # storage.block-cache.capacity: "16GB" - # readpool.unified.max-thread-count: 12 - readpool.storage.use-unified-pool: false - readpool.coprocessor.use-unified-pool: true - pd: - schedule.leader-schedule-limit: 4 - schedule.region-schedule-limit: 2048 - schedule.replica-schedule-limit: 64 - cdc: - # capture-session-ttl: 10 - # sorter.sort-dir: "/tmp/cdc_sort" - # gc-ttl: 86400 - -pd_servers: - - host: 10.0.1.4 - # ssh_port: 22 - # name: "pd-1" - # client_port: 2379 - # peer_port: 2380 - # deploy_dir: "/tidb-deploy/pd-2379" - # data_dir: "/tidb-data/pd-2379" - # log_dir: "/tidb-deploy/pd-2379/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.pd` values. - # config: - # schedule.max-merge-region-size: 20 - # schedule.max-merge-region-keys: 200000 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - # ssh_port: 22 - # port: 4000 - # status_port: 10080 - # deploy_dir: "/tidb-deploy/tidb-4000" - # log_dir: "/tidb-deploy/tidb-4000/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tidb` values. - # config: - # log.slow-query-file: tidb-slow-overwrited.log - - host: 10.0.1.2 - - host: 10.0.1.3 - -tikv_servers: - - host: 10.0.1.7 - # ssh_port: 22 - # port: 20160 - # status_port: 20180 - # deploy_dir: "/tidb-deploy/tikv-20160" - # data_dir: "/tidb-data/tikv-20160" - # log_dir: "/tidb-deploy/tikv-20160/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tikv` values. - # config: - # server.grpc-concurrency: 4 - # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - - - host: 10.0.1.8 - - host: 10.0.1.9 - -cdc_servers: - - host: 10.0.1.1 - port: 8300 - deploy_dir: "/tidb-deploy/cdc-8300" - data_dir: "/tidb-data/cdc-8300" - log_dir: "/tidb-deploy/cdc-8300/log" - # gc-ttl: 86400 - # ticdc_cluster_id: "cluster1" - - host: 10.0.1.2 - port: 8300 - deploy_dir: "/tidb-deploy/cdc-8300" - data_dir: "/tidb-data/cdc-8300" - log_dir: "/tidb-deploy/cdc-8300/log" - # gc-ttl: 86400 - # ticdc_cluster_id: "cluster1" - - host: 10.0.1.3 - port: 8300 - deploy_dir: "/tidb-deploy/cdc-8300" - data_dir: "/tidb-data/cdc-8300" - log_dir: "/tidb-deploy/cdc-8300/log" - # gc-ttl: 86400 - # ticdc_cluster_id: "cluster2" - -monitoring_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # port: 9090 - # deploy_dir: "/tidb-deploy/prometheus-8249" - # data_dir: "/tidb-data/prometheus-8249" - # log_dir: "/tidb-deploy/prometheus-8249/log" - -grafana_servers: - - host: 10.0.1.10 - # port: 3000 - # deploy_dir: /tidb-deploy/grafana-3000 - -alertmanager_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # web_port: 9093 - # cluster_port: 9094 - # deploy_dir: "/tidb-deploy/alertmanager-9093" - # data_dir: "/tidb-data/alertmanager-9093" - # log_dir: "/tidb-deploy/alertmanager-9093/log" diff --git a/config-templates/complex-mini.yaml b/config-templates/complex-mini.yaml deleted file mode 100644 index 916cd3b125d3..000000000000 --- a/config-templates/complex-mini.yaml +++ /dev/null @@ -1,112 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -# # Monitored variables are applied to all the machines. -monitored: - node_exporter_port: 9100 - blackbox_exporter_port: 9115 - # deploy_dir: "/tidb-deploy/monitored-9100" - # data_dir: "/tidb-data/monitored-9100" - # log_dir: "/tidb-deploy/monitored-9100/log" - -# # Server configs are used to specify the runtime configuration of TiDB components. -# # All configuration items can be found in TiDB docs: -# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file -# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file -# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file -# # All configuration items use points to represent the hierarchy, e.g: -# # readpool.storage.use-unified-pool -# # -# # You can overwrite this configuration via the instance-level `config` field. - -server_configs: - tidb: - log.slow-threshold: 300 - binlog.enable: false - binlog.ignore-error: false - tikv: - # server.grpc-concurrency: 4 - # raftstore.apply-pool-size: 2 - # raftstore.store-pool-size: 2 - # rocksdb.max-sub-compactions: 1 - # storage.block-cache.capacity: "16GB" - # readpool.unified.max-thread-count: 12 - readpool.storage.use-unified-pool: false - readpool.coprocessor.use-unified-pool: true - pd: - schedule.leader-schedule-limit: 4 - schedule.region-schedule-limit: 2048 - schedule.replica-schedule-limit: 64 - -pd_servers: - - host: 10.0.1.4 - # ssh_port: 22 - # name: "pd-1" - # client_port: 2379 - # peer_port: 2380 - # deploy_dir: "/tidb-deploy/pd-2379" - # data_dir: "/tidb-data/pd-2379" - # log_dir: "/tidb-deploy/pd-2379/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.pd` values. - # config: - # schedule.max-merge-region-size: 20 - # schedule.max-merge-region-keys: 200000 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - # ssh_port: 22 - # port: 4000 - # status_port: 10080 - # deploy_dir: "/tidb-deploy/tidb-4000" - # log_dir: "/tidb-deploy/tidb-4000/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tidb` values. - # config: - # log.slow-query-file: tidb-slow-overwrited.log - - host: 10.0.1.2 - -tikv_servers: - - host: 10.0.1.7 - # ssh_port: 22 - # port: 20160 - # status_port: 20180 - # deploy_dir: "/tidb-deploy/tikv-20160" - # data_dir: "/tidb-data/tikv-20160" - # log_dir: "/tidb-deploy/tikv-20160/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tikv` values. - # config: - # server.grpc-concurrency: 4 - # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - - host: 10.0.1.8 - - host: 10.0.1.9 - -monitoring_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # port: 9090 - # deploy_dir: "/tidb-deploy/prometheus-8249" - # data_dir: "/tidb-data/prometheus-8249" - # log_dir: "/tidb-deploy/prometheus-8249/log" - -grafana_servers: - - host: 10.0.1.10 - # port: 3000 - # deploy_dir: /tidb-deploy/grafana-3000 - -alertmanager_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # web_port: 9093 - # cluster_port: 9094 - # deploy_dir: "/tidb-deploy/alertmanager-9093" - # data_dir: "/tidb-data/alertmanager-9093" - # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/config-templates/complex-multi-instance.yaml b/config-templates/complex-multi-instance.yaml deleted file mode 100644 index 5464cfc16781..000000000000 --- a/config-templates/complex-multi-instance.yaml +++ /dev/null @@ -1,150 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -monitored: - node_exporter_port: 9100 - blackbox_exporter_port: 9115 - deploy_dir: "/tidb-deploy/monitored-9100" - data_dir: "/tidb-data/monitored-9100" - log_dir: "/tidb-deploy/monitored-9100/log" - -server_configs: - tidb: - log.slow-threshold: 300 - tikv: - readpool.unified.max-thread-count: - readpool.storage.use-unified-pool: false - readpool.coprocessor.use-unified-pool: true - storage.block-cache.capacity: "" - raftstore.capacity: "" - pd: - replication.location-labels: ["host"] - schedule.leader-schedule-limit: 4 - schedule.region-schedule-limit: 2048 - schedule.replica-schedule-limit: 64 - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - port: 4000 - status_port: 10080 - deploy_dir: "/tidb-deploy/tidb-4000" - log_dir: "/tidb-deploy/tidb-4000/log" - numa_node: "0" - - host: 10.0.1.1 - port: 4001 - status_port: 10081 - deploy_dir: "/tidb-deploy/tidb-4001" - log_dir: "/tidb-deploy/tidb-4001/log" - numa_node: "1" - - host: 10.0.1.2 - port: 4000 - status_port: 10080 - deploy_dir: "/tidb-deploy/tidb-4000" - log_dir: "/tidb-deploy/tidb-4000/log" - numa_node: "0" - - host: 10.0.1.2 - port: 4001 - status_port: 10081 - deploy_dir: "/tidb-deploy/tidb-4001" - log_dir: "/tidb-deploy/tidb-4001/log" - numa_node: "1" - - host: 10.0.1.3 - port: 4000 - status_port: 10080 - deploy_dir: "/tidb-deploy/tidb-4000" - log_dir: "/tidb-deploy/tidb-4000/log" - numa_node: "0" - - host: 10.0.1.3 - port: 4001 - status_port: 10081 - deploy_dir: "/tidb-deploy/tidb-4001" - log_dir: "/tidb-deploy/tidb-4001/log" - numa_node: "1" - -tikv_servers: - - host: 10.0.1.7 - port: 20160 - status_port: 20180 - deploy_dir: "/tidb-deploy/tikv-20160" - data_dir: "/tidb-data/tikv-20160" - log_dir: "/tidb-deploy/tikv-20160/log" - numa_node: "0" - config: - server.labels: { host: "tikv1" } - - host: 10.0.1.7 - port: 20161 - status_port: 20181 - deploy_dir: "/tidb-deploy/tikv-20161" - data_dir: "/tidb-data/tikv-20161" - log_dir: "/tidb-deploy/tikv-20161/log" - numa_node: "1" - config: - server.labels: { host: "tikv1" } - - host: 10.0.1.8 - port: 20160 - status_port: 20180 - deploy_dir: "/tidb-deploy/tikv-20160" - data_dir: "/tidb-data/tikv-20160" - log_dir: "/tidb-deploy/tikv-20160/log" - numa_node: "0" - config: - server.labels: { host: "tikv2" } - - host: 10.0.1.8 - port: 20161 - status_port: 20181 - deploy_dir: "/tidb-deploy/tikv-20161" - data_dir: "/tidb-data/tikv-20161" - log_dir: "/tidb-deploy/tikv-20161/log" - numa_node: "1" - config: - server.labels: { host: "tikv2" } - - host: 10.0.1.9 - port: 20160 - status_port: 20180 - deploy_dir: "/tidb-deploy/tikv-20160" - data_dir: "/tidb-data/tikv-20160" - log_dir: "/tidb-deploy/tikv-20160/log" - numa_node: "0" - config: - server.labels: { host: "tikv3" } - - host: 10.0.1.9 - port: 20161 - status_port: 20181 - deploy_dir: "/tidb-deploy/tikv-20161" - data_dir: "/tidb-data/tikv-20161" - log_dir: "/tidb-deploy/tikv-20161/log" - numa_node: "1" - config: - server.labels: { host: "tikv3" } - -monitoring_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # port: 9090 - # deploy_dir: "/tidb-deploy/prometheus-8249" - # data_dir: "/tidb-data/prometheus-8249" - # log_dir: "/tidb-deploy/prometheus-8249/log" - -grafana_servers: - - host: 10.0.1.10 - # port: 3000 - # deploy_dir: /tidb-deploy/grafana-3000 - -alertmanager_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # web_port: 9093 - # cluster_port: 9094 - # deploy_dir: "/tidb-deploy/alertmanager-9093" - # data_dir: "/tidb-data/alertmanager-9093" - # log_dir: "/tidb-deploy/alertmanager-9093/log" \ No newline at end of file diff --git a/config-templates/complex-tidb-binlog.yaml b/config-templates/complex-tidb-binlog.yaml deleted file mode 100644 index 12131675e253..000000000000 --- a/config-templates/complex-tidb-binlog.yaml +++ /dev/null @@ -1,175 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -# # Monitored variables are applied to all the machines. -monitored: - node_exporter_port: 9100 - blackbox_exporter_port: 9115 - # deploy_dir: "/tidb-deploy/monitored-9100" - # data_dir: "/tidb-data/monitored-9100" - # log_dir: "/tidb-deploy/monitored-9100/log" - -# # Server configs are used to specify the runtime configuration of TiDB components. -# # All configuration items can be found in TiDB docs: -# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file -# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file -# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file -# # All configuration items use points to represent the hierarchy, e.g: -# # readpool.storage.use-unified-pool -# # -# # You can overwrite this configuration via the instance-level `config` field. - -server_configs: - tidb: - log.slow-threshold: 300 - binlog.enable: true - binlog.ignore-error: true - tikv: - # server.grpc-concurrency: 4 - # raftstore.apply-pool-size: 2 - # raftstore.store-pool-size: 2 - # rocksdb.max-sub-compactions: 1 - # storage.block-cache.capacity: "16GB" - # readpool.unified.max-thread-count: 12 - readpool.storage.use-unified-pool: false - readpool.coprocessor.use-unified-pool: true - pd: - schedule.leader-schedule-limit: 4 - schedule.region-schedule-limit: 2048 - schedule.replica-schedule-limit: 64 - -pd_servers: - - host: 10.0.1.4 - # ssh_port: 22 - # name: "pd-1" - # client_port: 2379 - # peer_port: 2380 - # deploy_dir: "/tidb-deploy/pd-2379" - # data_dir: "/tidb-data/pd-2379" - # log_dir: "/tidb-deploy/pd-2379/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.pd` values. - # config: - # schedule.max-merge-region-size: 20 - # schedule.max-merge-region-keys: 200000 - - host: 10.0.1.5 - - host: 10.0.1.6 -tidb_servers: - - host: 10.0.1.1 - # ssh_port: 22 - # port: 4000 - # status_port: 10080 - # deploy_dir: "/tidb-deploy/tidb-4000" - # log_dir: "/tidb-deploy/tidb-4000/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tidb` values. - # config: - # log.slow-query-file: tidb-slow-overwrited.log - - host: 10.0.1.2 - - host: 10.0.1.3 -tikv_servers: - - host: 10.0.1.7 - # ssh_port: 22 - # port: 20160 - # status_port: 20180 - # deploy_dir: "/tidb-deploy/tikv-20160" - # data_dir: "/tidb-data/tikv-20160" - # log_dir: "/tidb-deploy/tikv-20160/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tikv` values. - # config: - # server.grpc-concurrency: 4 - # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - - host: 10.0.1.8 - - host: 10.0.1.9 - -pump_servers: - - host: 10.0.1.1 - ssh_port: 22 - port: 8250 - deploy_dir: "/tidb-deploy/pump-8250" - data_dir: "/tidb-data/pump-8250" - # The following configs are used to overwrite the `server_configs.pump` values. - config: - gc: 7 - - host: 10.0.1.2 - ssh_port: 22 - port: 8250 - deploy_dir: "/tidb-deploy/pump-8250" - data_dir: "/tidb-data/pump-8250" - # The following configs are used to overwrite the `server_configs.pump` values. - config: - gc: 7 - - host: 10.0.1.3 - ssh_port: 22 - port: 8250 - deploy_dir: "/tidb-deploy/pump-8250" - data_dir: "/tidb-data/pump-8250" - # The following configs are used to overwrite the `server_configs.pump` values. - config: - gc: 7 -drainer_servers: - - host: 10.0.1.12 - port: 8249 - deploy_dir: "/tidb-deploy/drainer-8249" - data_dir: "/tidb-data/drainer-8249" - # If drainer doesn't have a checkpoint, use initial commitTS as the initial checkpoint. - # Will get a latest timestamp from pd if commit_ts is set to -1 (the default value). - commit_ts: -1 - # The following configs are used to overwrite the `server_configs.drainer` values. - config: - syncer.db-type: "tidb" - syncer.to.host: "10.0.1.12" - syncer.to.user: "root" - syncer.to.password: "" - syncer.to.port: 4000 - syncer.to.checkpoint: - schema: "tidb_binlog" - type: "tidb" - host: "10.0.1.14" - user: "root" - password: "123" - port: 4000 - - host: 10.0.1.13 - port: 8249 - deploy_dir: "/tidb-deploy/drainer-8249" - data_dir: "/tidb-data/drainer-8249" - # If Drainer does not have a checkpoint, use the initial commitTS as the initial checkpoint. - # If commit_ts is set to -1 (the default value), you will get a latest timestamp from PD. - commit_ts: -1 - # The following configurations are used to overwrite the `server_configs.drainer` values. - config: - syncer.db-type: "kafka" - syncer.replicate-do-db: - - db1 - - db2 - syncer.to.kafka-addrs: "10.0.1.20:9092,10.0.1.21:9092,10.0.1.22:9092" - syncer.to.kafka-version: "2.4.0" - syncer.to.topic-name: "asyouwish" - -monitoring_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # port: 9090 - # deploy_dir: "/tidb-deploy/prometheus-8249" - # data_dir: "/tidb-data/prometheus-8249" - # log_dir: "/tidb-deploy/prometheus-8249/log" - -grafana_servers: - - host: 10.0.1.10 - # port: 3000 - # deploy_dir: /tidb-deploy/grafana-3000 - -alertmanager_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # web_port: 9093 - # cluster_port: 9094 - # deploy_dir: "/tidb-deploy/alertmanager-9093" - # data_dir: "/tidb-data/alertmanager-9093" - # log_dir: "/tidb-deploy/alertmanager-9093/log" diff --git a/config-templates/complex-tiflash.yaml b/config-templates/complex-tiflash.yaml deleted file mode 100644 index 91af039c1806..000000000000 --- a/config-templates/complex-tiflash.yaml +++ /dev/null @@ -1,157 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -# # Monitored variables are applied to all the machines. -monitored: - node_exporter_port: 9100 - blackbox_exporter_port: 9115 - # deploy_dir: "/tidb-deploy/monitored-9100" - # data_dir: "/tidb-data/monitored-9100" - # log_dir: "/tidb-deploy/monitored-9100/log" - -# # Server configs are used to specify the runtime configuration of TiDB components. -# # All configuration items can be found in TiDB docs: -# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file -# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file -# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file -# # All configuration items use points to represent the hierarchy, e.g: -# # readpool.storage.use-unified-pool -# # -# # You can overwrite this configuration via the instance-level `config` field. - -server_configs: - tidb: - log.slow-threshold: 300 - tikv: - # server.grpc-concurrency: 4 - # raftstore.apply-pool-size: 2 - # raftstore.store-pool-size: 2 - # rocksdb.max-sub-compactions: 1 - # storage.block-cache.capacity: "16GB" - # readpool.unified.max-thread-count: 12 - readpool.storage.use-unified-pool: false - readpool.coprocessor.use-unified-pool: true - pd: - schedule.leader-schedule-limit: 4 - schedule.region-schedule-limit: 2048 - schedule.replica-schedule-limit: 64 - replication.enable-placement-rules: true - tiflash: - # Maximum memory usage for processing a single query. Zero means unlimited. - profiles.default.max_memory_usage: 0 - # Maximum memory usage for processing all concurrently running queries on the server. Zero means unlimited. - profiles.default.max_memory_usage_for_all_queries: 0 - tiflash-learner: - # The allowable number of threads in the pool that flushes Raft data to storage. - raftstore.apply-pool-size: 4 - # The allowable number of threads that process Raft, which is the size of the Raftstore thread pool. - raftstore.store-pool-size: 4 -pd_servers: - - host: 10.0.1.4 - # ssh_port: 22 - # name: "pd-1" - # client_port: 2379 - # peer_port: 2380 - # deploy_dir: "/tidb-deploy/pd-2379" - # data_dir: "/tidb-data/pd-2379" - # log_dir: "/tidb-deploy/pd-2379/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.pd` values. - # config: - # schedule.max-merge-region-size: 20 - # schedule.max-merge-region-keys: 200000 - - host: 10.0.1.5 - - host: 10.0.1.6 -tidb_servers: - - host: 10.0.1.7 - # ssh_port: 22 - # port: 4000 - # status_port: 10080 - # deploy_dir: "/tidb-deploy/tidb-4000" - # log_dir: "/tidb-deploy/tidb-4000/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tidb` values. - # config: - # log.slow-query-file: tidb-slow-overwrited.log - - host: 10.0.1.8 - - host: 10.0.1.9 -tikv_servers: - - host: 10.0.1.1 - # ssh_port: 22 - # port: 20160 - # status_port: 20180 - # deploy_dir: "/tidb-deploy/tikv-20160" - # data_dir: "/tidb-data/tikv-20160" - # log_dir: "/tidb-deploy/tikv-20160/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tikv` values. - # config: - # server.grpc-concurrency: 4 - # server.labels: - # zone: "zone1" - # dc: "dc1" - # host: "host1" - - host: 10.0.1.2 - - host: 10.0.1.3 - -tiflash_servers: - - host: 10.0.1.11 - # ssh_port: 22 - # tcp_port: 9000 - # flash_service_port: 3930 - # flash_proxy_port: 20170 - # flash_proxy_status_port: 20292 - # metrics_port: 8234 - # deploy_dir: "/tidb-deploy/tiflash-9000" - ## The `data_dir` will be overwritten if you define `storage.main.dir` configurations in the `config` section. - # data_dir: "/tidb-data/tiflash-9000" - # log_dir: "/tidb-deploy/tiflash-9000/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tiflash` values. - # config: - # logger.level: "info" - # ## Multi-disk deployment introduced in v4.0.9 - # ## Check https://docs.pingcap.com/tidb/stable/tiflash-configuration#multi-disk-deployment for more details. - # ## Example1: - # # storage.main.dir: [ "/nvme_ssd0_512/tiflash", "/nvme_ssd1_512/tiflash" ] - # # storage.main.capacity: [ 536870912000, 536870912000 ] - # ## Example2: - # # storage.main.dir: [ "/sata_ssd0_512/tiflash", "/sata_ssd1_512/tiflash", "/sata_ssd2_512/tiflash" ] - # # storage.latest.dir: [ "/nvme_ssd0_150/tiflash" ] - # # storage.main.capacity: [ 536870912000, 536870912000, 536870912000 ] - # # storage.latest.capacity: [ 161061273600 ] - # learner_config: - # log-level: "info" - # server.labels: - # zone: "zone2" - # dc: "dc2" - # host: "host2" - # - host: 10.0.1.12 - # - host: 10.0.1.13 - -monitoring_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # port: 9090 - # deploy_dir: "/tidb-deploy/prometheus-8249" - # data_dir: "/tidb-data/prometheus-8249" - # log_dir: "/tidb-deploy/prometheus-8249/log" - -grafana_servers: - - host: 10.0.1.10 - # port: 3000 - # deploy_dir: /tidb-deploy/grafana-3000 - -alertmanager_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # web_port: 9093 - # cluster_port: 9094 - # deploy_dir: "/tidb-deploy/alertmanager-9093" - # data_dir: "/tidb-data/alertmanager-9093" - # log_dir: "/tidb-deploy/alertmanager-9093/log" diff --git a/config-templates/complex-tispark.yaml b/config-templates/complex-tispark.yaml deleted file mode 100644 index 8e1899788d9e..000000000000 --- a/config-templates/complex-tispark.yaml +++ /dev/null @@ -1,150 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -# # Monitored variables are applied to all the machines. -monitored: - node_exporter_port: 9100 - blackbox_exporter_port: 9115 - # deploy_dir: "/tidb-deploy/monitored-9100" - # data_dir: "/tidb-data/monitored-9100" - # log_dir: "/tidb-deploy/monitored-9100/log" - -# # Server configs are used to specify the runtime configuration of TiDB components. -# # All configuration items can be found in TiDB docs: -# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file -# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file -# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file -# # All configuration items use points to represent the hierarchy, e.g: -# # readpool.storage.use-unified-pool -# # -# # You can overwrite this configuration via the instance-level `config` field. - -server_configs: - tidb: - log.slow-threshold: 300 - tikv: - # server.grpc-concurrency: 4 - # raftstore.apply-pool-size: 2 - # raftstore.store-pool-size: 2 - # rocksdb.max-sub-compactions: 1 - # storage.block-cache.capacity: "16GB" - # readpool.unified.max-thread-count: 12 - readpool.storage.use-unified-pool: false - readpool.coprocessor.use-unified-pool: true - pd: - schedule.leader-schedule-limit: 4 - schedule.region-schedule-limit: 2048 - schedule.replica-schedule-limit: 64 - -pd_servers: - - host: 10.0.1.4 - # ssh_port: 22 - # name: "pd-1" - # client_port: 2379 - # peer_port: 2380 - # deploy_dir: "/tidb-deploy/pd-2379" - # data_dir: "/tidb-data/pd-2379" - # log_dir: "/tidb-deploy/pd-2379/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.pd` values. - # config: - # schedule.max-merge-region-size: 20 - # schedule.max-merge-region-keys: 200000 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - # ssh_port: 22 - # port: 4000 - # status_port: 10080 - # deploy_dir: "/tidb-deploy/tidb-4000" - # log_dir: "/tidb-deploy/tidb-4000/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tidb` values. - # config: - # log.slow-query-file: tidb-slow-overwrited.log - - host: 10.0.1.2 - - host: 10.0.1.3 - -tikv_servers: - - host: 10.0.1.7 - # ssh_port: 22 - # port: 20160 - # status_port: 20180 - # deploy_dir: "/tidb-deploy/tikv-20160" - # data_dir: "/tidb-data/tikv-20160" - # log_dir: "/tidb-deploy/tikv-20160/log" - # numa_node: "0,1" - # # The following configs are used to overwrite the `server_configs.tikv` values. - # config: - # server.grpc-concurrency: 4 - # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - - - host: 10.0.1.8 - - host: 10.0.1.9 - -# NOTE: TiSpark support is an experimental feature, it's not recommend to be used in -# production at present. -# To use TiSpark, you need to manually install Java Runtime Environment (JRE) 8 on the -# host, see the OpenJDK doc for a reference: https://openjdk.java.net/install/ -# If you have already installed JRE 1.8 at a location other than the default of system's -# package management system, you may use the "java_home" field to set the JAVA_HOME variable. -# NOTE: Only 1 master node is supported for now -tispark_masters: - - host: 10.0.1.21 - # ssh_port: 22 - # port: 7077 - # web_port: 8080 - # deploy_dir: "/tidb-deploy/tispark-master-7077" - # java_home: "/usr/local/bin/java-1.8.0" - # spark_config: - # spark.driver.memory: "2g" - # spark.eventLog.enabled: "False" - # spark.tispark.grpc.framesize: 268435456 - # spark.tispark.grpc.timeout_in_sec: 100 - # spark.tispark.meta.reload_period_in_sec: 60 - # spark.tispark.request.command.priority: "Low" - # spark.tispark.table.scan_concurrency: 256 - # spark_env: - # SPARK_EXECUTOR_CORES: 5 - # SPARK_EXECUTOR_MEMORY: "10g" - # SPARK_WORKER_CORES: 5 - # SPARK_WORKER_MEMORY: "10g" - -# NOTE: multiple worker nodes on the same host is not supported by Spark -tispark_workers: - - host: 10.0.1.22 - # ssh_port: 22 - # port: 7078 - # web_port: 8081 - # deploy_dir: "/tidb-deploy/tispark-worker-7078" - # java_home: "/usr/local/bin/java-1.8.0" - - host: 10.0.1.23 - -monitoring_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # port: 9090 - # deploy_dir: "/tidb-deploy/prometheus-8249" - # data_dir: "/tidb-data/prometheus-8249" - # log_dir: "/tidb-deploy/prometheus-8249/log" - -grafana_servers: - - host: 10.0.1.10 - # port: 3000 - # deploy_dir: /tidb-deploy/grafana-3000 - -alertmanager_servers: - - host: 10.0.1.10 - # ssh_port: 22 - # web_port: 9093 - # cluster_port: 9094 - # deploy_dir: "/tidb-deploy/alertmanager-9093" - # data_dir: "/tidb-data/alertmanager-9093" - # log_dir: "/tidb-deploy/alertmanager-9093/log" diff --git a/config-templates/em-topology-config.yaml b/config-templates/em-topology-config.yaml deleted file mode 100644 index cfb94bbe0f55..000000000000 --- a/config-templates/em-topology-config.yaml +++ /dev/null @@ -1,98 +0,0 @@ -global: - user: "tidb" - group: "tidb" - ssh_port: 22 - deploy_dir: "/em-deploy" - data_dir: "/em-data" - arch: "amd64" - log_level: "info" - # specify the external elasticsearch address. default is empty - # and elasticsearch_servers can only have one value - external_elasticsearch_url: "" - login_host_user: "" - login_private_key_path: "" - login_public_key_path: "" - -monitored: - node_exporter_port: 4124 - -em_cluster_servers: - - host: {ip} - # port: 4101 - # metrics_port: 4104 - # registry_client_port: 4106 - # registry_peer_port: 4107 - # deploy_dir: "/em-deploy/cluster-server-4101" - # data_dir: "/em-data/cluster-server-41101" - -em_api_servers: - - host: {ip} - # port: 4100 - # metrics_port: 4103 - # deploy_dir: "/em-deploy/openapi-server-4100" - # data_dir: "/em-data/openapi-server-4100" - -em_web_servers: - - host: {ip} - # port: 4180 - # deploy_dir: "/em-deploy/em-web-4180" - # data_dir: "/em-data/em-web-4180" - -em_file_servers: - - host: {ip} - # port: 4102 - # metrics_port: 4105 - # deploy_dir: "/em-deploy/file-server-4102" - # data_dir: "/em-data/file-server-4102" - -elasticsearch_servers: - - host: {ip} - # port: 4108 - # heap_size: "4g" - # deploy_dir: "/em-deploy/elasticsearch-4108" - # data_dir: "/em-data/elasticsearch-4108" - -tracer_servers: - - host: {ip} - # port: 4114 - # web_port: 4115 - # zipkin_thrift_port: 4116 - # compact_thrift_port: 4117 - # binary_thrift_port: 4118 - # agent_serve_port: 4119 - # jaeger_thrift_port: 4120 - # jaeger_host_port: 4121 - # collector_port: 4122 - # grpc_serve_port: 4123 - # deploy_dir: "/em-deploy/tracer-4114" - # data_dir: "/em-data/tracer-4114" - -kibana_servers: - - host: {ip} - # port: 4109 - # deploy_dir: "/em-deploy/kibana-4109" - # data_dir: "/em-data/kibana-4109" - -monitoring_servers: - - host: {ip} - # port: 4110 - # deploy_dir: "/em-deploy/prometheus-4110" - # data_dir: "/em-data/prometheus-4110" - -alertmanager_servers: - - host: {ip} - # web_port: 4112 - # cluster_port: 4113 - # deploy_dir: "/em-deploy/alertmanager-4112" - # data_dir: "/em-data/alertmanager-4112" - -grafana_servers: - - host: {ip} - # port: 4111 - # deploy_dir: "/em-deploy/grafana-4111" - # data_dir: "/em-data/grafana-4111" - -filebeat_servers: - - host: {ip} - # deploy_dir: "/em-deploy/filebeat-0" - # data_dir: "/em-data/filebeat-0" diff --git a/config-templates/geo-redundancy-deployment.yaml b/config-templates/geo-redundancy-deployment.yaml deleted file mode 100644 index 888ddd6a99f7..000000000000 --- a/config-templates/geo-redundancy-deployment.yaml +++ /dev/null @@ -1,115 +0,0 @@ -# Tip: PD priority needs to be manually set using the PD-ctl client tool. such as, member Leader_priority PD-name numbers. -# Global variables are applied to all deployments and used as the default value of -# the deployments if a specific deployment value is missing. -# -# Abbreviations used in this example: -# sh: Shanghai Zone -# bj: Beijing Zone -# sha: Shanghai Datacenter A -# bja: Beijing Datacenter A -# bjb: Beijing Datacenter B - -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" -monitored: - node_exporter_port: 9100 - blackbox_exporter_port: 9115 - deploy_dir: "/tidb-deploy/monitored-9100" -server_configs: - tidb: - log.level: debug - log.slow-query-file: tidb-slow.log - tikv: - server.grpc-compression-type: gzip - readpool.storage.use-unified-pool: true - readpool.storage.low-concurrency: 8 - pd: - replication.location-labels: ["zone","dc","rack","host"] - replication.max-replicas: 5 - label-property: # TiDB 5.2 及以上版本默认不支持 label-property 配置。若要设置副本策略,请使用 Placement Rules。 - reject-leader: - - key: "dc" - value: "sha" -pd_servers: - - host: 10.0.1.6 - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - - host: 10.0.1.10 -tidb_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 - - host: 10.0.1.4 - - host: 10.0.1.5 -tikv_servers: - - host: 10.0.1.11 - ssh_port: 22 - port: 20160 - status_port: 20180 - deploy_dir: "/tidb-deploy/tikv-20160" - data_dir: "/tidb-data/tikv-20160" - config: - server.labels: - zone: bj - dc: bja - rack: rack1 - host: host1 - - host: 10.0.1.12 - ssh_port: 22 - port: 20161 - status_port: 20181 - deploy_dir: "/tidb-deploy/tikv-20161" - data_dir: "/tidb-data/tikv-20161" - config: - server.labels: - zone: bj - dc: bja - rack: rack1 - host: host2 - - host: 10.0.1.13 - ssh_port: 22 - port: 20160 - status_port: 20180 - deploy_dir: "/tidb-deploy/tikv-20160" - data_dir: "/tidb-data/tikv-20160" - config: - server.labels: - zone: bj - dc: bjb - rack: rack1 - host: host1 - - host: 10.0.1.14 - ssh_port: 22 - port: 20161 - status_port: 20181 - deploy_dir: "/tidb-deploy/tikv-20161" - data_dir: "/tidb-data/tikv-20161" - config: - server.labels: - zone: bj - dc: bjb - rack: rack1 - host: host2 - - host: 10.0.1.15 - ssh_port: 22 - port: 20160 - deploy_dir: "/tidb-deploy/tikv-20160" - data_dir: "/tidb-data/tikv-20160" - config: - server.labels: - zone: sh - dc: sha - rack: rack1 - host: host1 - readpool.storage.use-unified-pool: true - readpool.storage.low-concurrency: 10 - raftstore.raft-min-election-timeout-ticks: 1000 - raftstore.raft-max-election-timeout-ticks: 1020 -monitoring_servers: - - host: 10.0.1.16 -grafana_servers: - - host: 10.0.1.16 \ No newline at end of file diff --git a/config-templates/simple-cdc.yaml b/config-templates/simple-cdc.yaml deleted file mode 100644 index 4d820082de20..000000000000 --- a/config-templates/simple-cdc.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 - -tikv_servers: - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - -cdc_servers: - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - -monitoring_servers: - - host: 10.0.1.10 - -grafana_servers: - - host: 10.0.1.10 - -alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-file-binlog.yaml b/config-templates/simple-file-binlog.yaml deleted file mode 100644 index 18896911d724..000000000000 --- a/config-templates/simple-file-binlog.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -server_configs: - tidb: - binlog.enable: true - binlog.ignore-error: true - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 -tidb_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 -tikv_servers: - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - -pump_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 -drainer_servers: - - host: 10.0.1.12 - # drainer meta data directory path - data_dir: "/path/to/save/data" - config: - syncer.db-type: "file" - # directory to save binlog file, default same as data-dir(save checkpoint file) if this is not configured. - # syncer.to.dir: "/path/to/save/binlog" - -monitoring_servers: - - host: 10.0.1.10 - -grafana_servers: - - host: 10.0.1.10 - -alertmanager_servers: - - host: 10.0.1.10 diff --git a/config-templates/simple-mini.yaml b/config-templates/simple-mini.yaml deleted file mode 100644 index df7a90653c68..000000000000 --- a/config-templates/simple-mini.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - -tikv_servers: - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - -monitoring_servers: - - host: 10.0.1.10 - -grafana_servers: - - host: 10.0.1.10 - -alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-multi-instance.yaml b/config-templates/simple-multi-instance.yaml deleted file mode 100644 index 735b3be1af61..000000000000 --- a/config-templates/simple-multi-instance.yaml +++ /dev/null @@ -1,95 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -server_configs: - tikv: - readpool.unified.max-thread-count: - readpool.storage.use-unified-pool: false - readpool.coprocessor.use-unified-pool: true - storage.block-cache.capacity: "" - raftstore.capacity: "" - pd: - replication.location-labels: ["host"] - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - port: 4000 - status_port: 10080 - numa_node: "0" - - host: 10.0.1.1 - port: 4001 - status_port: 10081 - numa_node: "1" - - host: 10.0.1.2 - port: 4000 - status_port: 10080 - numa_node: "0" - - host: 10.0.1.2 - port: 4001 - status_port: 10081 - numa_node: "1" - - host: 10.0.1.3 - port: 4000 - status_port: 10080 - numa_node: "0" - - host: 10.0.1.3 - port: 4001 - status_port: 10081 - numa_node: "1" - -tikv_servers: - - host: 10.0.1.7 - port: 20160 - status_port: 20180 - numa_node: "0" - config: - server.labels: { host: "tikv1" } - - host: 10.0.1.7 - port: 20161 - status_port: 20181 - numa_node: "1" - config: - server.labels: { host: "tikv1" } - - host: 10.0.1.8 - port: 20160 - status_port: 20180 - numa_node: "0" - config: - server.labels: { host: "tikv2" } - - host: 10.0.1.8 - port: 20161 - status_port: 20181 - numa_node: "1" - config: - server.labels: { host: "tikv2" } - - host: 10.0.1.9 - port: 20160 - status_port: 20180 - numa_node: "0" - config: - server.labels: { host: "tikv3" } - - host: 10.0.1.9 - port: 20161 - status_port: 20181 - numa_node: "1" - config: - server.labels: { host: "tikv3" } - -monitoring_servers: - - host: 10.0.1.10 - -grafana_servers: - - host: 10.0.1.10 - -alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-tidb-binlog.yaml b/config-templates/simple-tidb-binlog.yaml deleted file mode 100644 index 558023e00356..000000000000 --- a/config-templates/simple-tidb-binlog.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -server_configs: - tidb: - binlog.enable: true - binlog.ignore-error: true - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 -tidb_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 -tikv_servers: - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - -pump_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 -drainer_servers: - - host: 10.0.1.12 - config: - syncer.db-type: "tidb" - syncer.to.host: "10.0.1.12" - syncer.to.user: "root" - syncer.to.password: "" - syncer.to.port: 4000 - -monitoring_servers: - - host: 10.0.1.10 - -grafana_servers: - - host: 10.0.1.10 - -alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-tiflash.yaml b/config-templates/simple-tiflash.yaml deleted file mode 100644 index 1e2024440854..000000000000 --- a/config-templates/simple-tiflash.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -server_configs: - pd: - replication.enable-placement-rules: true - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - -tikv_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 - -tiflash_servers: - - host: 10.0.1.11 - data_dir: /tidb-data/tiflash-9000 - deploy_dir: /tidb-deploy/tiflash-9000 - -monitoring_servers: - - host: 10.0.1.10 - -grafana_servers: - - host: 10.0.1.10 - -alertmanager_servers: - - host: 10.0.1.10 \ No newline at end of file diff --git a/config-templates/simple-tispark.yaml b/config-templates/simple-tispark.yaml deleted file mode 100644 index 7566e8394f9a..000000000000 --- a/config-templates/simple-tispark.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# # Global variables are applied to all deployments and used as the default value of -# # the deployments if a specific deployment value is missing. -global: - user: "tidb" - ssh_port: 22 - deploy_dir: "/tidb-deploy" - data_dir: "/tidb-data" - -pd_servers: - - host: 10.0.1.4 - - host: 10.0.1.5 - - host: 10.0.1.6 - -tidb_servers: - - host: 10.0.1.1 - - host: 10.0.1.2 - - host: 10.0.1.3 - -tikv_servers: - - host: 10.0.1.7 - - host: 10.0.1.8 - - host: 10.0.1.9 - - -# NOTE: TiSpark support is an experimental feature, it's not recommend to be used in -# production at present. -# To use TiSpark, you need to manually install Java Runtime Environment (JRE) 8 on the -# host, see the OpenJDK doc for a reference: https://openjdk.java.net/install/ -# NOTE: Only 1 master node is supported for now -tispark_masters: - - host: 10.0.1.21 - -# NOTE: multiple worker nodes on the same host is not supported by Spark -tispark_workers: - - host: 10.0.1.22 - - host: 10.0.1.23 - -monitoring_servers: - - host: 10.0.1.10 - -grafana_servers: - - host: 10.0.1.10 - -alertmanager_servers: - - host: 10.0.1.10 diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 0f473ebddee9..89881352d434 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -19,7 +19,128 @@ aliases: ['/docs-cn/dev/geo-distributed-deployment-topology/'] ### 拓扑模版 -[跨机房配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/geo-redundancy-deployment.yaml) +
+跨机房配置模板 + +```yaml +# Tip: PD priority needs to be manually set using the PD-ctl client tool. such as, member Leader_priority PD-name numbers. +# Global variables are applied to all deployments and used as the default value of +# the deployments if a specific deployment value is missing. +# +# Abbreviations used in this example: +# sh: Shanghai Zone +# bj: Beijing Zone +# sha: Shanghai Datacenter A +# bja: Beijing Datacenter A +# bjb: Beijing Datacenter B + +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" +server_configs: + tidb: + log.level: debug + log.slow-query-file: tidb-slow.log + tikv: + server.grpc-compression-type: gzip + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 8 + pd: + replication.location-labels: ["zone","dc","rack","host"] + replication.max-replicas: 5 + label-property: # TiDB 5.2 及以上版本默认不支持 label-property 配置。若要设置副本策略,请使用 Placement Rules。 + reject-leader: + - key: "dc" + value: "sha" +pd_servers: + - host: 10.0.1.6 + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + - host: 10.0.1.10 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + - host: 10.0.1.4 + - host: 10.0.1.5 +tikv_servers: + - host: 10.0.1.11 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host1 + - host: 10.0.1.12 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bja + rack: rack1 + host: host2 + - host: 10.0.1.13 + ssh_port: 22 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host1 + - host: 10.0.1.14 + ssh_port: 22 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + config: + server.labels: + zone: bj + dc: bjb + rack: rack1 + host: host2 + - host: 10.0.1.15 + ssh_port: 22 + port: 20160 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + config: + server.labels: + zone: sh + dc: sha + rack: rack1 + host: host1 + readpool.storage.use-unified-pool: true + readpool.storage.low-concurrency: 10 + raftstore.raft-min-election-timeout-ticks: 1000 + raftstore.raft-max-election-timeout-ticks: 1020 +monitoring_servers: + - host: 10.0.1.16 +grafana_servers: + - host: 10.0.1.16 +``` + +
以上 TiDB 集群拓扑文件中,详细的配置项说明见[通过 TiUP 部署 TiDB 集群的拓扑文件配置](/tiup/tiup-cluster-topology-reference.md)。 diff --git a/hybrid-deployment-topology.md b/hybrid-deployment-topology.md index 303706881a6d..d64104aaf4bf 100644 --- a/hybrid-deployment-topology.md +++ b/hybrid-deployment-topology.md @@ -19,9 +19,266 @@ aliases: ['/docs-cn/dev/hybrid-deployment-topology/'] ### 拓扑模版 -[简单混部配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-multi-instance.yaml) - -[详细混部配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-multi-instance.yaml) +
+简单混部配置模板 + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tikv: + readpool.unified.max-thread-count: + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "" + raftstore.capacity: "" + pd: + replication.location-labels: ["host"] + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + numa_node: "1" + config: + server.labels: { host: "tikv3" } + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 +``` + +
+ +
+详细混部配置模板 + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + deploy_dir: "/tidb-deploy/monitored-9100" + data_dir: "/tidb-data/monitored-9100" + log_dir: "/tidb-deploy/monitored-9100/log" + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + readpool.unified.max-thread-count: + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + storage.block-cache.capacity: "" + raftstore.capacity: "" + pd: + replication.location-labels: ["host"] + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.1 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.2 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.2 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + - host: 10.0.1.3 + port: 4000 + status_port: 10080 + deploy_dir: "/tidb-deploy/tidb-4000" + log_dir: "/tidb-deploy/tidb-4000/log" + numa_node: "0" + - host: 10.0.1.3 + port: 4001 + status_port: 10081 + deploy_dir: "/tidb-deploy/tidb-4001" + log_dir: "/tidb-deploy/tidb-4001/log" + numa_node: "1" + +tikv_servers: + - host: 10.0.1.7 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.7 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv1" } + - host: 10.0.1.8 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.8 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv2" } + - host: 10.0.1.9 + port: 20160 + status_port: 20180 + deploy_dir: "/tidb-deploy/tikv-20160" + data_dir: "/tidb-data/tikv-20160" + log_dir: "/tidb-deploy/tikv-20160/log" + numa_node: "0" + config: + server.labels: { host: "tikv3" } + - host: 10.0.1.9 + port: 20161 + status_port: 20181 + deploy_dir: "/tidb-deploy/tikv-20161" + data_dir: "/tidb-data/tikv-20161" + log_dir: "/tidb-deploy/tikv-20161/log" + numa_node: "1" + config: + server.labels: { host: "tikv3" } + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" +``` + +
以上 TiDB 集群拓扑文件中,详细的配置项说明见[通过 TiUP 部署 TiDB 集群的拓扑文件配置](/tiup/tiup-cluster-topology-reference.md)。 diff --git a/minimal-deployment-topology.md b/minimal-deployment-topology.md index e45ce8043b3c..0aa6bcdc5bfc 100644 --- a/minimal-deployment-topology.md +++ b/minimal-deployment-topology.md @@ -19,9 +19,163 @@ aliases: ['/docs-cn/dev/minimal-deployment-topology/'] ### 拓扑模版 -[简单最小配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-mini.yaml) +
+简单最小配置模板 -[详细最小配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-mini.yaml) +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 +``` + +
+ +
+详细最小配置模板 + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file +# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file +# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + binlog.enable: false + binlog.ignore-error: false + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" +``` + +
以上 TiDB 集群拓扑文件中,详细的配置项说明见[通过 TiUP 部署 TiDB 集群的拓扑文件配置](/tiup/tiup-cluster-topology-reference.md)。 diff --git a/production-deployment-using-tiup.md b/production-deployment-using-tiup.md index b19fa30d4a81..3592009adce4 100644 --- a/production-deployment-using-tiup.md +++ b/production-deployment-using-tiup.md @@ -275,13 +275,13 @@ alertmanager_servers: | 场景 | 配置任务 | 配置文件模板 | 拓扑说明 | | :-- | :-- | :-- | :-- | -| OLTP 业务 | [部署最小拓扑架构](/minimal-deployment-topology.md) | [简单最小配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-mini.yaml)
[详细最小配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-mini.yaml) | 最小集群拓扑,包括 tidb-server、tikv-server、pd-server。 | -| HTAP 业务 | [部署 TiFlash 拓扑架构](/tiflash-deployment-topology.md) | [简单 TiFlash 配置模版](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-tiflash.yaml)
[详细 TiFlash 配置模版](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-tiflash.yaml) | 在最小拓扑的基础上部署 TiFlash。TiFlash 是列式存储引擎,已经逐步成为集群拓扑的标配。| -| 使用 [TiCDC](/ticdc/ticdc-overview.md) 进行增量同步 | [部署 TiCDC 拓扑架构](/ticdc-deployment-topology.md) | [简单 TiCDC 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-cdc.yaml)
[详细 TiCDC 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-cdc.yaml) | 在最小拓扑的基础上部署 TiCDC。TiCDC 支持多种下游:TiDB、MySQL、Kafka、MQ、Confluent 和存储服务。 | -| 使用 [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) 进行增量同步 | [部署 TiDB Binlog 拓扑架构](/tidb-binlog-deployment-topology.md) | [简单 TiDB Binlog 配置模板(下游为 MySQL)](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-tidb-binlog.yaml)
[简单 TiDB Binlog 配置模板(下游为 file)](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-file-binlog.yaml)
[详细 TiDB Binlog 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-tidb-binlog.yaml) | 在最小拓扑的基础上部署 TiDB Binlog。 | -| 使用 Spark 的 OLAP 业务 | [部署 TiSpark 拓扑架构](/tispark-deployment-topology.md) | [简单 TiSpark 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-tispark.yaml)
[详细 TiSpark 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-tispark.yaml) | 在最小拓扑的基础上部署 TiSpark 组件。TiSpark 是 PingCAP 为解决用户复杂 OLAP 需求而推出的产品。TiUP cluster 组件对 TiSpark 的支持目前为实验特性。 | -| 单台机器,多个实例 | [混合部署拓扑架构](/hybrid-deployment-topology.md) | [简单混部配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-multi-instance.yaml)
[详细混部配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-multi-instance.yaml) | 也适用于单机多实例需要额外增加目录、端口、资源配比、label 等配置的场景。 | -| 跨机房部署 TiDB 集群 | [跨机房部署拓扑架构](/geo-distributed-deployment-topology.md) | [跨机房配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/geo-redundancy-deployment.yaml) | 以典型的两地三中心架构为例,介绍跨机房部署架构,以及需要注意的关键设置。 | +| OLTP 业务 | [部署最小拓扑架构](/minimal-deployment-topology.md) | [简单最小配置模板](/minimal-deployment-topology.md#拓扑模版)
[详细最小配置模板](/minimal-deployment-topology.md#拓扑模版) | 最小集群拓扑,包括 tidb-server、tikv-server、pd-server。 | +| HTAP 业务 | [部署 TiFlash 拓扑架构](/tiflash-deployment-topology.md) | [简单 TiFlash 配置模版](/tiflash-deployment-topology.md#拓扑模版)
[详细 TiFlash 配置模版](/tiflash-deployment-topology.md#拓扑模版) | 在最小拓扑的基础上部署 TiFlash。TiFlash 是列式存储引擎,已经逐步成为集群拓扑的标配。| +| 使用 [TiCDC](/ticdc/ticdc-overview.md) 进行增量同步 | [部署 TiCDC 拓扑架构](/ticdc-deployment-topology.md) | [简单 TiCDC 配置模板](/ticdc-deployment-topology.md#拓扑模版)
[详细 TiCDC 配置模板](/ticdc-deployment-topology.md#拓扑模版) | 在最小拓扑的基础上部署 TiCDC。TiCDC 支持多种下游:TiDB、MySQL、Kafka、MQ、Confluent 和存储服务。 | +| 使用 [TiDB Binlog](/tidb-binlog/tidb-binlog-overview.md) 进行增量同步 | [部署 TiDB Binlog 拓扑架构](/tidb-binlog-deployment-topology.md) | [简单 TiDB Binlog 配置模板(下游为 MySQL)](/tidb-binlog-deployment-topology.md#拓扑模版)
[简单 TiDB Binlog 配置模板(下游为 file)](/tidb-binlog-deployment-topology.md#拓扑模版)
[详细 TiDB Binlog 配置模板](/tidb-binlog-deployment-topology.md#拓扑模版) | 在最小拓扑的基础上部署 TiDB Binlog。 | +| 使用 Spark 的 OLAP 业务 | [部署 TiSpark 拓扑架构](/tispark-deployment-topology.md) | [简单 TiSpark 配置模板](/tispark-deployment-topology.md#拓扑模版)
[详细 TiSpark 配置模板](/tispark-deployment-topology.md#拓扑模版) | 在最小拓扑的基础上部署 TiSpark 组件。TiSpark 是 PingCAP 为解决用户复杂 OLAP 需求而推出的产品。TiUP cluster 组件对 TiSpark 的支持目前为实验特性。 | +| 单台机器,多个实例 | [混合部署拓扑架构](/hybrid-deployment-topology.md) | [简单混部配置模板](/hybrid-deployment-topology.md#拓扑模版)
[详细混部配置模板](/hybrid-deployment-topology.md#拓扑模版) | 也适用于单机多实例需要额外增加目录、端口、资源配比、label 等配置的场景。 | +| 跨机房部署 TiDB 集群 | [跨机房部署拓扑架构](/geo-distributed-deployment-topology.md) | [跨机房配置模板](/geo-distributed-deployment-topology.md#拓扑模版) | 以典型的两地三中心架构为例,介绍跨机房部署架构,以及需要注意的关键设置。 | > **注意:** > diff --git a/ticdc-deployment-topology.md b/ticdc-deployment-topology.md index a9db1075f105..2ea0f43fe466 100644 --- a/ticdc-deployment-topology.md +++ b/ticdc-deployment-topology.md @@ -24,9 +24,196 @@ aliases: ['/docs-cn/dev/ticdc-deployment-topology/','/docs-cn/dev/reference/tool ### 拓扑模版 -[简单 TiCDC 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-cdc.yaml) +
+简单 TiCDC 配置模板 -[详细 TiCDC 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-cdc.yaml) +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +cdc_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 +``` + +
+ +
+详细 TiCDC 配置模板 + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file +# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file +# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + cdc: + # capture-session-ttl: 10 + # sorter.sort-dir: "/tmp/cdc_sort" + # gc-ttl: 86400 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + + - host: 10.0.1.8 + - host: 10.0.1.9 + +cdc_servers: + - host: 10.0.1.1 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + data_dir: "/tidb-data/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + # gc-ttl: 86400 + # ticdc_cluster_id: "cluster1" + - host: 10.0.1.2 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + data_dir: "/tidb-data/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + # gc-ttl: 86400 + # ticdc_cluster_id: "cluster1" + - host: 10.0.1.3 + port: 8300 + deploy_dir: "/tidb-deploy/cdc-8300" + data_dir: "/tidb-data/cdc-8300" + log_dir: "/tidb-deploy/cdc-8300/log" + # gc-ttl: 86400 + # ticdc_cluster_id: "cluster2" + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" +``` + +
以上 TiDB 集群拓扑文件中,详细的配置项说明见[通过 TiUP 部署 TiDB 集群的拓扑文件配置](/tiup/tiup-cluster-topology-reference.md#cdc_servers)。 diff --git a/tidb-binlog-deployment-topology.md b/tidb-binlog-deployment-topology.md index 9eacb98bb2f0..5cc377acde80 100644 --- a/tidb-binlog-deployment-topology.md +++ b/tidb-binlog-deployment-topology.md @@ -20,11 +20,298 @@ aliases: ['/docs-cn/dev/tidb-binlog-deployment-topology/'] ### 拓扑模版 -[简单 TiDB Binlog 配置模板(下游为 MySQL)](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-tidb-binlog.yaml) +
+简单 TiDB Binlog 配置模板(下游为 MySQL) -[简单 TiDB Binlog 配置模板(下游为 file)](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-file-binlog.yaml) +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" -[详细 TiDB Binlog 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-tidb-binlog.yaml) +server_configs: + tidb: + binlog.enable: true + binlog.ignore-error: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +drainer_servers: + - host: 10.0.1.12 + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 +``` + +
+ +
+简单 TiDB Binlog 配置模板(下游为 file) + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + tidb: + binlog.enable: true + binlog.ignore-error: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 +drainer_servers: + - host: 10.0.1.12 + # drainer meta data directory path + data_dir: "/path/to/save/data" + config: + syncer.db-type: "file" + # directory to save binlog file, default same as data-dir(save checkpoint file) if this is not configured. + # syncer.to.dir: "/path/to/save/binlog" + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 +``` + +
+ +
+详细 TiDB Binlog 配置模板 + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file +# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file +# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + binlog.enable: true + binlog.ignore-error: true + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + - host: 10.0.1.8 + - host: 10.0.1.9 + +pump_servers: + - host: 10.0.1.1 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8250" + data_dir: "/tidb-data/pump-8250" + # The following configs are used to overwrite the `server_configs.pump` values. + config: + gc: 7 + - host: 10.0.1.2 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8250" + data_dir: "/tidb-data/pump-8250" + # The following configs are used to overwrite the `server_configs.pump` values. + config: + gc: 7 + - host: 10.0.1.3 + ssh_port: 22 + port: 8250 + deploy_dir: "/tidb-deploy/pump-8250" + data_dir: "/tidb-data/pump-8250" + # The following configs are used to overwrite the `server_configs.pump` values. + config: + gc: 7 +drainer_servers: + - host: 10.0.1.12 + port: 8249 + deploy_dir: "/tidb-deploy/drainer-8249" + data_dir: "/tidb-data/drainer-8249" + # If drainer doesn't have a checkpoint, use initial commitTS as the initial checkpoint. + # Will get a latest timestamp from pd if commit_ts is set to -1 (the default value). + commit_ts: -1 + # The following configs are used to overwrite the `server_configs.drainer` values. + config: + syncer.db-type: "tidb" + syncer.to.host: "10.0.1.12" + syncer.to.user: "root" + syncer.to.password: "" + syncer.to.port: 4000 + syncer.to.checkpoint: + schema: "tidb_binlog" + type: "tidb" + host: "10.0.1.14" + user: "root" + password: "123" + port: 4000 + - host: 10.0.1.13 + port: 8249 + deploy_dir: "/tidb-deploy/drainer-8249" + data_dir: "/tidb-data/drainer-8249" + # If Drainer does not have a checkpoint, use the initial commitTS as the initial checkpoint. + # If commit_ts is set to -1 (the default value), you will get a latest timestamp from PD. + commit_ts: -1 + # The following configurations are used to overwrite the `server_configs.drainer` values. + config: + syncer.db-type: "kafka" + syncer.replicate-do-db: + - db1 + - db2 + syncer.to.kafka-addrs: "10.0.1.20:9092,10.0.1.21:9092,10.0.1.22:9092" + syncer.to.kafka-version: "2.4.0" + syncer.to.topic-name: "asyouwish" + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" +``` + +
以上 TiDB 集群拓扑文件中,详细的配置项说明见[通过 TiUP 部署 TiDB 集群的拓扑文件配置](/tiup/tiup-cluster-topology-reference.md)。 diff --git a/tiflash-deployment-topology.md b/tiflash-deployment-topology.md index 331d06ab30f5..f10e45306158 100644 --- a/tiflash-deployment-topology.md +++ b/tiflash-deployment-topology.md @@ -20,9 +20,374 @@ aliases: ['/docs-cn/dev/tiflash-deployment-topology/'] ### 拓扑模版 -- [简单 TiFlash 配置模版](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-tiflash.yaml) +
+简单 TiFlash 配置模版 -- [详细 TiFlash 配置模版](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-tiflash.yaml) +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +server_configs: + pd: + replication.enable-placement-rules: true + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + +tikv_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tiflash_servers: + - host: 10.0.1.11 + data_dir: /tidb-data/tiflash-9000 + deploy_dir: /tidb-deploy/tiflash-9000 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 +``` + +
+ +
+详细 TiFlash 配置模版 + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file +# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file +# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + replication.enable-placement-rules: true + tiflash: + # Maximum memory usage for processing a single query. Zero means unlimited. + profiles.default.max_memory_usage: 0 + # Maximum memory usage for processing all concurrently running queries on the server. Zero means unlimited. + profiles.default.max_memory_usage_for_all_queries: 0 + tiflash-learner: + # The allowable number of threads in the pool that flushes Raft data to storage. + raftstore.apply-pool-size: 4 + # The allowable number of threads that process Raft, which is the size of the Raftstore thread pool. + raftstore.store-pool-size: 4 +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.8 + - host: 10.0.1.9 +tikv_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: + # zone: "zone1" + # dc: "dc1" + # host: "host1" + - host: 10.0.1.2 + - host: 10.0.1.3 + +tiflash_servers: + - host: 10.0.1.11 + # ssh_port: 22 + # tcp_port: 9000 + # flash_service_port: 3930 + # flash_proxy_port: 20170 + # flash_proxy_status_port: 20292 + # metrics_port: 8234 + # deploy_dir: "/tidb-deploy/tiflash-9000" + ## The `data_dir` will be overwritten if you define `storage.main.dir` configurations in the `config` section. + # data_dir: "/tidb-data/tiflash-9000" + # log_dir: "/tidb-deploy/tiflash-9000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tiflash` values. + # config: + # logger.level: "info" + # ## Multi-disk deployment introduced in v4.0.9 + # ## Check https://docs.pingcap.com/tidb/stable/tiflash-configuration#multi-disk-deployment for more details. + # ## Example1: + # # storage.main.dir: [ "/nvme_ssd0_512/tiflash", "/nvme_ssd1_512/tiflash" ] + # # storage.main.capacity: [ 536870912000, 536870912000 ] + # ## Example2: + # # storage.main.dir: [ "/sata_ssd0_512/tiflash", "/sata_ssd1_512/tiflash", "/sata_ssd2_512/tiflash" ] + # # storage.latest.dir: [ "/nvme_ssd0_150/tiflash" ] + # # storage.main.capacity: [ 536870912000, 536870912000, 536870912000 ] + # # storage.latest.capacity: [ 161061273600 ] + # learner_config: + # log-level: "info" + # server.labels: + # zone: "zone2" + # dc: "dc2" + # host: "host2" + # - host: 10.0.1.12 + # - host: 10.0.1.13 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log"# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file +# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file +# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + replication.enable-placement-rules: true + tiflash: + # Maximum memory usage for processing a single query. Zero means unlimited. + profiles.default.max_memory_usage: 0 + # Maximum memory usage for processing all concurrently running queries on the server. Zero means unlimited. + profiles.default.max_memory_usage_for_all_queries: 0 + tiflash-learner: + # The allowable number of threads in the pool that flushes Raft data to storage. + raftstore.apply-pool-size: 4 + # The allowable number of threads that process Raft, which is the size of the Raftstore thread pool. + raftstore.store-pool-size: 4 +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 +tidb_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.8 + - host: 10.0.1.9 +tikv_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: + # zone: "zone1" + # dc: "dc1" + # host: "host1" + - host: 10.0.1.2 + - host: 10.0.1.3 + +tiflash_servers: + - host: 10.0.1.11 + # ssh_port: 22 + # tcp_port: 9000 + # flash_service_port: 3930 + # flash_proxy_port: 20170 + # flash_proxy_status_port: 20292 + # metrics_port: 8234 + # deploy_dir: "/tidb-deploy/tiflash-9000" + ## The `data_dir` will be overwritten if you define `storage.main.dir` configurations in the `config` section. + # data_dir: "/tidb-data/tiflash-9000" + # log_dir: "/tidb-deploy/tiflash-9000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tiflash` values. + # config: + # logger.level: "info" + # ## Multi-disk deployment introduced in v4.0.9 + # ## Check https://docs.pingcap.com/tidb/stable/tiflash-configuration#multi-disk-deployment for more details. + # ## Example1: + # # storage.main.dir: [ "/nvme_ssd0_512/tiflash", "/nvme_ssd1_512/tiflash" ] + # # storage.main.capacity: [ 536870912000, 536870912000 ] + # ## Example2: + # # storage.main.dir: [ "/sata_ssd0_512/tiflash", "/sata_ssd1_512/tiflash", "/sata_ssd2_512/tiflash" ] + # # storage.latest.dir: [ "/nvme_ssd0_150/tiflash" ] + # # storage.main.capacity: [ 536870912000, 536870912000, 536870912000 ] + # # storage.latest.capacity: [ 161061273600 ] + # learner_config: + # log-level: "info" + # server.labels: + # zone: "zone2" + # dc: "dc2" + # host: "host2" + # - host: 10.0.1.12 + # - host: 10.0.1.13 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" +``` + +
以上 TiDB 集群拓扑文件中,详细的配置项说明见[通过 TiUP 部署 TiDB 集群的拓扑文件配置](/tiup/tiup-cluster-topology-reference.md#tiflash_servers)。 diff --git a/tiflash/tiflash-configuration.md b/tiflash/tiflash-configuration.md index df7aa7f26c71..7903f6a2956c 100644 --- a/tiflash/tiflash-configuration.md +++ b/tiflash/tiflash-configuration.md @@ -296,7 +296,7 @@ delta_index_cache_size = 0 ### 多盘部署 -TiFlash 支持单节点多盘部署。如果你的部署节点上有多块硬盘,可以通过以下的方式配置参数,提高节点的硬盘 I/O 利用率。TiUP 中参数配置格式参照[详细 TiFlash 配置模版](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-tiflash.yaml)。 +TiFlash 支持单节点多盘部署。如果你的部署节点上有多块硬盘,可以通过以下的方式配置参数,提高节点的硬盘 I/O 利用率。TiUP 中参数配置格式参照[详细 TiFlash 配置模版](/tiflash-deployment-topology.md#拓扑模版)。 #### TiDB 集群版本低于 v4.0.9 diff --git a/tispark-deployment-topology.md b/tispark-deployment-topology.md index 3db6943e9904..6011059e1534 100644 --- a/tispark-deployment-topology.md +++ b/tispark-deployment-topology.md @@ -26,9 +26,216 @@ aliases: ['/docs-cn/dev/tispark-deployment-topology/'] ### 拓扑模版 -[简单 TiSpark 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/simple-tispark.yaml) +
+简单 TiSpark 配置模板 -[详细 TiSpark 配置模板](https://github.com/pingcap/docs-cn/blob/master/config-templates/complex-tispark.yaml) +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +pd_servers: + - host: 10.0.1.4 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + - host: 10.0.1.8 + - host: 10.0.1.9 + + +# NOTE: TiSpark support is an experimental feature, it's not recommend to be used in +# production at present. +# To use TiSpark, you need to manually install Java Runtime Environment (JRE) 8 on the +# host, see the OpenJDK doc for a reference: https://openjdk.java.net/install/ +# NOTE: Only 1 master node is supported for now +tispark_masters: + - host: 10.0.1.21 + +# NOTE: multiple worker nodes on the same host is not supported by Spark +tispark_workers: + - host: 10.0.1.22 + - host: 10.0.1.23 + +monitoring_servers: + - host: 10.0.1.10 + +grafana_servers: + - host: 10.0.1.10 + +alertmanager_servers: + - host: 10.0.1.10 +``` + +
+ +
+详细 TiSpark 配置模板 + +```yaml +# # Global variables are applied to all deployments and used as the default value of +# # the deployments if a specific deployment value is missing. +global: + user: "tidb" + ssh_port: 22 + deploy_dir: "/tidb-deploy" + data_dir: "/tidb-data" + +# # Monitored variables are applied to all the machines. +monitored: + node_exporter_port: 9100 + blackbox_exporter_port: 9115 + # deploy_dir: "/tidb-deploy/monitored-9100" + # data_dir: "/tidb-data/monitored-9100" + # log_dir: "/tidb-deploy/monitored-9100/log" + +# # Server configs are used to specify the runtime configuration of TiDB components. +# # All configuration items can be found in TiDB docs: +# # - TiDB: https://docs.pingcap.com/zh/tidb/stable/tidb-configuration-file +# # - TiKV: https://docs.pingcap.com/zh/tidb/stable/tikv-configuration-file +# # - PD: https://docs.pingcap.com/zh/tidb/stable/pd-configuration-file +# # All configuration items use points to represent the hierarchy, e.g: +# # readpool.storage.use-unified-pool +# # +# # You can overwrite this configuration via the instance-level `config` field. + +server_configs: + tidb: + log.slow-threshold: 300 + tikv: + # server.grpc-concurrency: 4 + # raftstore.apply-pool-size: 2 + # raftstore.store-pool-size: 2 + # rocksdb.max-sub-compactions: 1 + # storage.block-cache.capacity: "16GB" + # readpool.unified.max-thread-count: 12 + readpool.storage.use-unified-pool: false + readpool.coprocessor.use-unified-pool: true + pd: + schedule.leader-schedule-limit: 4 + schedule.region-schedule-limit: 2048 + schedule.replica-schedule-limit: 64 + +pd_servers: + - host: 10.0.1.4 + # ssh_port: 22 + # name: "pd-1" + # client_port: 2379 + # peer_port: 2380 + # deploy_dir: "/tidb-deploy/pd-2379" + # data_dir: "/tidb-data/pd-2379" + # log_dir: "/tidb-deploy/pd-2379/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.pd` values. + # config: + # schedule.max-merge-region-size: 20 + # schedule.max-merge-region-keys: 200000 + - host: 10.0.1.5 + - host: 10.0.1.6 + +tidb_servers: + - host: 10.0.1.1 + # ssh_port: 22 + # port: 4000 + # status_port: 10080 + # deploy_dir: "/tidb-deploy/tidb-4000" + # log_dir: "/tidb-deploy/tidb-4000/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tidb` values. + # config: + # log.slow-query-file: tidb-slow-overwrited.log + - host: 10.0.1.2 + - host: 10.0.1.3 + +tikv_servers: + - host: 10.0.1.7 + # ssh_port: 22 + # port: 20160 + # status_port: 20180 + # deploy_dir: "/tidb-deploy/tikv-20160" + # data_dir: "/tidb-data/tikv-20160" + # log_dir: "/tidb-deploy/tikv-20160/log" + # numa_node: "0,1" + # # The following configs are used to overwrite the `server_configs.tikv` values. + # config: + # server.grpc-concurrency: 4 + # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } + + - host: 10.0.1.8 + - host: 10.0.1.9 + +# NOTE: TiSpark support is an experimental feature, it's not recommend to be used in +# production at present. +# To use TiSpark, you need to manually install Java Runtime Environment (JRE) 8 on the +# host, see the OpenJDK doc for a reference: https://openjdk.java.net/install/ +# If you have already installed JRE 1.8 at a location other than the default of system's +# package management system, you may use the "java_home" field to set the JAVA_HOME variable. +# NOTE: Only 1 master node is supported for now +tispark_masters: + - host: 10.0.1.21 + # ssh_port: 22 + # port: 7077 + # web_port: 8080 + # deploy_dir: "/tidb-deploy/tispark-master-7077" + # java_home: "/usr/local/bin/java-1.8.0" + # spark_config: + # spark.driver.memory: "2g" + # spark.eventLog.enabled: "False" + # spark.tispark.grpc.framesize: 268435456 + # spark.tispark.grpc.timeout_in_sec: 100 + # spark.tispark.meta.reload_period_in_sec: 60 + # spark.tispark.request.command.priority: "Low" + # spark.tispark.table.scan_concurrency: 256 + # spark_env: + # SPARK_EXECUTOR_CORES: 5 + # SPARK_EXECUTOR_MEMORY: "10g" + # SPARK_WORKER_CORES: 5 + # SPARK_WORKER_MEMORY: "10g" + +# NOTE: multiple worker nodes on the same host is not supported by Spark +tispark_workers: + - host: 10.0.1.22 + # ssh_port: 22 + # port: 7078 + # web_port: 8081 + # deploy_dir: "/tidb-deploy/tispark-worker-7078" + # java_home: "/usr/local/bin/java-1.8.0" + - host: 10.0.1.23 + +monitoring_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # port: 9090 + # deploy_dir: "/tidb-deploy/prometheus-8249" + # data_dir: "/tidb-data/prometheus-8249" + # log_dir: "/tidb-deploy/prometheus-8249/log" + +grafana_servers: + - host: 10.0.1.10 + # port: 3000 + # deploy_dir: /tidb-deploy/grafana-3000 + +alertmanager_servers: + - host: 10.0.1.10 + # ssh_port: 22 + # web_port: 9093 + # cluster_port: 9094 + # deploy_dir: "/tidb-deploy/alertmanager-9093" + # data_dir: "/tidb-data/alertmanager-9093" + # log_dir: "/tidb-deploy/alertmanager-9093/log" +``` + +
以上 TiDB 集群拓扑文件中,详细的配置项说明见[通过 TiUP 部署 TiDB 集群的拓扑文件配置](/tiup/tiup-cluster-topology-reference.md#tispark_masters)。 diff --git a/tiunimanager/tiunimanager-install-and-maintain.md b/tiunimanager/tiunimanager-install-and-maintain.md index 8ea008dee2ca..9d72c697b314 100644 --- a/tiunimanager/tiunimanager-install-and-maintain.md +++ b/tiunimanager/tiunimanager-install-and-maintain.md @@ -83,7 +83,113 @@ TiUniManager 正常运行需要网络环境提供如下端口配置,管理员 ## 拓扑模板 -在线部署 TiUniManager 前,你需要准备好 YAML 拓扑文件。TiUniManager 离线包中包含 YAML 拓扑文件模板。本节介绍用于部署 TiUniManager 拓扑配置模版。见 [TiUniManager 拓扑配置模版 config.yaml(单机版)](https://github.com/pingcap/docs-cn/blob/master/config-templates/em-topology-config.yaml)。 +在线部署 TiUniManager 前,你需要准备好 YAML 拓扑文件。TiUniManager 离线包中包含 YAML 拓扑文件模板。本节介绍用于部署 TiUniManager 拓扑配置模版。 + +
+TiUniManager 拓扑配置模版 config.yaml(单机版) + +```yaml +global: + user: "tidb" + group: "tidb" + ssh_port: 22 + deploy_dir: "/em-deploy" + data_dir: "/em-data" + arch: "amd64" + log_level: "info" + # specify the external elasticsearch address. default is empty + # and elasticsearch_servers can only have one value + external_elasticsearch_url: "" + login_host_user: "" + login_private_key_path: "" + login_public_key_path: "" + +monitored: + node_exporter_port: 4124 + +em_cluster_servers: + - host: {ip} + # port: 4101 + # metrics_port: 4104 + # registry_client_port: 4106 + # registry_peer_port: 4107 + # deploy_dir: "/em-deploy/cluster-server-4101" + # data_dir: "/em-data/cluster-server-41101" + +em_api_servers: + - host: {ip} + # port: 4100 + # metrics_port: 4103 + # deploy_dir: "/em-deploy/openapi-server-4100" + # data_dir: "/em-data/openapi-server-4100" + +em_web_servers: + - host: {ip} + # port: 4180 + # deploy_dir: "/em-deploy/em-web-4180" + # data_dir: "/em-data/em-web-4180" + +em_file_servers: + - host: {ip} + # port: 4102 + # metrics_port: 4105 + # deploy_dir: "/em-deploy/file-server-4102" + # data_dir: "/em-data/file-server-4102" + +elasticsearch_servers: + - host: {ip} + # port: 4108 + # heap_size: "4g" + # deploy_dir: "/em-deploy/elasticsearch-4108" + # data_dir: "/em-data/elasticsearch-4108" + +tracer_servers: + - host: {ip} + # port: 4114 + # web_port: 4115 + # zipkin_thrift_port: 4116 + # compact_thrift_port: 4117 + # binary_thrift_port: 4118 + # agent_serve_port: 4119 + # jaeger_thrift_port: 4120 + # jaeger_host_port: 4121 + # collector_port: 4122 + # grpc_serve_port: 4123 + # deploy_dir: "/em-deploy/tracer-4114" + # data_dir: "/em-data/tracer-4114" + +kibana_servers: + - host: {ip} + # port: 4109 + # deploy_dir: "/em-deploy/kibana-4109" + # data_dir: "/em-data/kibana-4109" + +monitoring_servers: + - host: {ip} + # port: 4110 + # deploy_dir: "/em-deploy/prometheus-4110" + # data_dir: "/em-data/prometheus-4110" + +alertmanager_servers: + - host: {ip} + # web_port: 4112 + # cluster_port: 4113 + # deploy_dir: "/em-deploy/alertmanager-4112" + # data_dir: "/em-data/alertmanager-4112" + +grafana_servers: + - host: {ip} + # port: 4111 + # deploy_dir: "/em-deploy/grafana-4111" + # data_dir: "/em-data/grafana-4111" + +filebeat_servers: + - host: {ip} + # deploy_dir: "/em-deploy/filebeat-0" + # data_dir: "/em-data/filebeat-0" +``` + +
如果 TiUniManager 中控机通过用户名密钥访问 TiDB 资源机,需要参照指定 TiUniManager 中控机登录 TiDB 资源机的用户名和密钥,在配置文件 `config.yaml` 中指定用户名和密钥。 @@ -387,7 +493,7 @@ TiUniManager 正常运行需要网络环境提供如下端口配置,管理员 db_path: "/home/tidb/em.db" ``` - 从备份的元数据中恢复到新集群,流程和部署新集群相同。唯一的区别是在集群 yaml 配置中,`em_cluster_servers` 里增加了 `db_path: "/home/tidb/em.db"`,详细见 [TiUniManager 根据元数据恢复新集群拓扑配置模版 em.yaml(单机版)](https://github.com/pingcap/docs-cn/blob/master/config-templates/em-metadata-restore-config.yaml)。 + 从备份的元数据中恢复到新集群,流程和部署新集群相同。唯一的区别是在集群 yaml 配置中,`em_cluster_servers` 里增加了 `db_path: "/home/tidb/em.db"`,详细见 [TiUniManager 根据元数据恢复新集群拓扑配置模版 em.yaml(单机版)](#拓扑模板)。 ## 修改默认的集群备份路径 @@ -494,7 +600,7 @@ TiUniManager 可以通过 `config.yaml` 文件中全局的 `external_elasticsear 在 `config.yaml` 文件中,`external_elasticsearch_url` 和 `elasticsearch_servers` 都是用来指定 Elasticsearch 部署信息的,不能同时指定。如果指定了 `external_elasticsearch_url` 的值,则需要注释掉 `elasticsearch_servers` 组件的配置信息注释。 -配置格式参考 [TiUniManager 拓扑配置模版 config.yaml(单机版)](https://github.com/pingcap/docs-cn/blob/master/config-templates/em-topology-config.yaml)。 +配置格式参考 [TiUniManager 拓扑配置模版 config.yaml(单机版)](#拓扑模板)。 ## 手动指定部署的 Elasticsearch 组件堆内存大小 @@ -502,7 +608,7 @@ TiUniManager 可以通过 `config.yaml` 文件中全局的 `external_elasticsear `heap_size` 是选填参数,不手工指定会使用默认值。默认值为 `4g`。 -配置格式参考 [TiUniManager 拓扑配置模版 config.yaml(单机版)](https://github.com/pingcap/docs-cn/blob/master/config-templates/em-topology-config.yaml)。 +配置格式参考 [TiUniManager 拓扑配置模版 config.yaml(单机版)](#拓扑模板)。 ## 安装 Kibana 组件(可选)