@@ -392,6 +392,10 @@ The following part shows how to use Pump and Drainer based on the nodes above.
392392
393393 # the address of the PD cluster nodes
394394 pd-urls = "http://192.168.0.16:2379,http://192.168.0.15:2379,http://192.168.0.14:2379"
395+
396+ # [storage]
397+ # Set to true (by default) to guarantee reliability by ensuring binlog data is flushed to the disk
398+ # sync-log = true
395399 ```
396400
397401 - The example of starting Pump:
@@ -474,19 +478,20 @@ The following part shows how to use Pump and Drainer based on the nodes above.
474478 # the directory of the log file
475479 log-file = "drainer.log"
476480
481+ # Drainer compresses the data when it gets the binlog from Pump. The value can be "gzip". If it is not configured, it will not be compressed
482+ # compressor = "gzip"
483+
477484 # Syncer Configuration
478485 [syncer]
486+ # If the item is set, the sql-mode will be used to parse the DDL statement.
487+ # sql-mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION"
479488
480- # the db filter list ("INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql,test" by default)
481- # Does not support the Rename DDL operation on tables of `ignore schemas`.
482- ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
483-
484- # the number of SQL statements of a transaction which are output to the downstream database (1 by default)
485- txn-batch = 1
486-
489+ # the number of SQL statements of a transaction that are output to the downstream database (20 by default)
490+ txn-batch = 20
491+
487492 # the number of the concurrency of the downstream for synchronization. The bigger the value,
488- # the better throughput performance of the concurrency (1 by default)
489- worker-count = 1
493+ # the better throughput performance of the concurrency (16 by default)
494+ worker-count = 16
490495
491496 # whether to disable the SQL feature of splitting a single binlog file. If it is set to "true",
492497 # each binlog file is restored to a single transaction for synchronization based on the order of binlogs.
@@ -497,6 +502,10 @@ The following part shows how to use Pump and Drainer based on the nodes above.
497502 # Valid value: "mysql", "kafka", "pb", "flash"
498503 db-type = "mysql"
499504
505+ # the db filter list ("INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql,test" by default)
506+ # Does not support the Rename DDL operation on tables of `ignore schemas`.
507+ ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
508+
500509 # `replicate-do-db` has priority over `replicate-do-table`. When they have the same `db` name,
501510 # regular expressions are supported for configuration.
502511 # The regular expression should start with "~".
@@ -511,6 +520,11 @@ The following part shows how to use Pump and Drainer based on the nodes above.
511520 # db-name ="test"
512521 # tbl-name = "~^a.*"
513522
523+ # Ignore the replication of some tables
524+ # [[syncer.ignore-table]]
525+ # db-name = "test"
526+ # tbl-name = "log"
527+
514528 # the server parameters of the downstream database when `db-type` is set to "mysql"
515529 [syncer.to]
516530 host = "192.168.0.13"
@@ -527,6 +541,10 @@ The following part shows how to use Pump and Drainer based on the nodes above.
527541 # zookeeper-addrs = "127.0.0.1:2181"
528542 # kafka-addrs = "127.0.0.1:9092"
529543 # kafka-version = "0.8.2.0"
544+
545+ # the topic name of the Kafka cluster that saves the binlog data. The default value is <cluster-id>_obinlog
546+ # To run multiple Drainers to replicate data to the same Kafka cluster, you need to set different `topic-name`s for each Drainer.
547+ # topic-name = ""
530548 ```
531549
532550 - The example of starting Drainer:
0 commit comments