Description
1、所用版本皆是最新版本
2、canal-server已按配置实现HA模式
3、canal-adapter如果不用mq,用tcp也已实现HA模式
4、canal-adapter使用rabbitmq,所有的adapter都会去拉rabbitmq的数据,无法像tcp一样单点拉取rabbitmq数据,会存在数据消费的顺序问题,zookeeper在两个位置我都试过配置了不产生效果,望大大修复
5、以下是我的adapter配置:
server:
port: 7555
spring:
jackson:
date-format: yyyy-MM-dd HH:mm:ss
time-zone: GMT+8
default-property-inclusion: non_null
canal.conf:
canalServerHost:
#tcp kafka rocketMQ rabbitMQ
mode: rabbitMQ
flatMessage: true
zookeeperHosts: 172.16.61.127:2181,172.16.61.127:2182,172.16.61.127:2183
syncBatchSize: 1000
retries: 3
timeout:
accessKey:
secretKey:
consumerProperties:
# canal tcp consumer
# canal.tcp.server.host:
# canal.tcp.zookeeper.hosts: 172.16.61.127:2181,172.16.61.127:2182,172.16.61.127:2183
# canal.tcp.batch.size: 500
# canal.tcp.username: xxx
# canal.tcp.password: xxxxx
rabbitmq.host: 172.16.61.127:5672
rabbitmq.virtual.host: /
rabbitmq.username: admin
rabbitmq.password: admin
canalAdapters:
- instance: canal_queue # canal instance Name or mq topic name
groups:
- groupId: g1
outerAdapters:
- name: logger
- name: rdb
key: mysql1
properties:
jdbc.driverClassName: com.mysql.jdbc
508F
.Driver
jdbc.url: jdbc:mysql://172.16.61.127:33999/uppcloudtest?useUnicode=true&useSSL=false
jdbc.username: root
jdbc.password: Conlin360
druid.stat.enable: false
druid.stat.slowSqlMillis: 1000