• docker安装canal


    官方文档地址

    开始

    拉取镜像

    docker pull canal/canal-server
    

    下载脚本

    wget https://raw.githubusercontent.com/alibaba/canal/master/docker/run.sh 
    
    #!/bin/bash
    
    function usage() {
        echo "Usage:"
        echo "  run.sh [CONFIG]"
        echo "example 1 :"
        echo "  run.sh -e canal.instance.master.address=127.0.0.1:3306 \"
        echo "         -e canal.instance.dbUsername=canal \"
        echo "         -e canal.instance.dbPassword=canal \"
        echo "         -e canal.instance.connectionCharset=UTF-8 \"
        echo "         -e canal.instance.tsdb.enable=true \"
        echo "         -e canal.instance.gtidon=false \"
        echo "         -e canal.instance.filter.regex=.*\\\..* "
        echo "example 2 :"
        echo "  run.sh -e canal.admin.manager=127.0.0.1:8089 \"
        echo "         -e canal.admin.port=11110 \"
        echo "         -e canal.admin.user=admin \"
        echo "         -e canal.admin.passwd=4ACFE3202A5FF5CF467898FC58AAB1D615029441"
        exit
    }
    
    function check_port() {
        local port=$1
        local TL=$(which telnet)
        if [ -f $TL ]; then
            data=`echo quit | telnet 127.0.0.1 $port| grep -ic connected`
            echo $data
            return
        fi
    
        local NC=$(which nc)
        if [ -f $NC ]; then
            data=`nc -z -w 1 127.0.0.1 $port | grep -ic succeeded`
            echo $data
            return
        fi
        echo "0"
        return
    }
    
    function getMyIp() {
        case "`uname`" in
            Darwin)
             myip=`echo "show State:/Network/Global/IPv4" | scutil | grep PrimaryInterface | awk '{print $3}' | xargs ifconfig | grep inet | grep -v inet6 | awk '{print $2}'`
             ;;
            *)
             myip=`ip route get 1 | awk '{print $NF;exit}'`
             ;;
      esac
      echo $myip
    }
    
    CONFIG=${@:1}
    #VOLUMNS="-v $DATA:/home/admin/canal-server/logs"
    PORTLIST="11110 11111 11112 9100"
    PORTS=""
    for PORT in $PORTLIST ; do
        #exist=`check_port $PORT`
        exist="0"
        if [ "$exist" == "0" ]; then
            PORTS="$PORTS -p $PORT:$PORT"
        else
            echo "port $PORT is used , pls check"
            exit 1
        fi
    done
    
    NET_MODE=""
    case "`uname`" in
        Darwin)
            bin_abs_path=`cd $(dirname $0); pwd`
            ;;
        Linux)
            bin_abs_path=$(readlink -f $(dirname $0))
            NET_MODE="--net=host"
            PORTS=""
            ;;
        *)
            bin_abs_path=`cd $(dirname $0); pwd`
            NET_MODE="--net=host"
            PORTS=""
            ;;
    esac
    BASE=${bin_abs_path}
    DATA="$BASE/data"
    mkdir -p $DATA
    
    if [ $# -eq 0 ]; then
        usage
    elif [ "$1" == "-h" ] ; then
        usage
    elif [ "$1" == "help" ] ; then
        usage
    fi
    
    
    
    MEMORY="-m 4096m"
    LOCALHOST=`getMyIp`
    cmd="docker run -d -it -h $LOCALHOST $CONFIG --name=canal-server $VOLUMNS $NET_MODE $PORTS $MEMORY canal/canal-server"
    echo $cmd
    eval $cmd
    
    # 构建整合到canal-admin的后台 
    sh run.sh -e canal.admin.manager=192.168.33.10:8089 
    		  -e canal.admin.port=11110 
    		  -e canal.admin.user=admin 
    		  -e canal.admin.passwd=4ACFE3202A5FF5CF467898FC58AAB1D615029441 
    		  -e canal.admin.register.auto=true
    		  
    
    # 构建一个destination name为test的队列
    sh run.sh -e canal.auto.scan=true 
    		  -e canal.destinations=test 
    		  -e canal.instance.master.address=192.168.11.134:3306  
    		  -e canal.instance.dbUsername=root  
    		  -e canal.instance.dbPassword=dg123456  
    		  -e canal.instance.connectionCharset=UTF-8 
    		  -e canal.instance.tsdb.enable=true 
    		  -e canal.instance.gtidon=false  
    		  -e canal.instance.filter.regex=dg_directories.tb_area_agent 
    		  -e canal.mq.topic =queues_area_agent_sync
    		  -e canal.mq.exchange=exchange_queues_area_agent_sync
    		  -e canal.serverMode =rabbitMQ
    		  -e canal.mq.servers = 192.168.11.101
    		  -e canal.mq.vhost=/queue_dfml_bigdata
    		  -e canal.mq.username=longqh
    		  -e canal.mq.password=longqh@123
    

    登陆后台 192.168.33.10:8089

    主配置

    主要修改配置mq

    #################################################
    ######### 		common argument		#############
    #################################################
    # tcp bind ip
    canal.ip =
    # register ip to zookeeper
    canal.register.ip =
    canal.port = 11111
    canal.metrics.pull.port = 11112
    # canal instance user/passwd
    # canal.user = canal
    # canal.passwd = E3619321C1A937C46A0D8BD1DAC39F93B27D4458
    
    # canal admin config
    #canal.admin.manager = 127.0.0.1:8089
    canal.admin.port = 11110
    canal.admin.user = admin
    canal.admin.passwd = 4ACFE3202A5FF5CF467898FC58AAB1D615029441
    
    canal.zkServers =
    # flush data to zk
    canal.zookeeper.flush.period = 1000
    canal.withoutNetty = false
    # tcp, kafka, rocketMQ, rabbitMQ
    canal.serverMode = rabbitMQ
    # flush meta cursor/parse position to file
    canal.file.data.dir = ${canal.conf.dir}
    canal.file.flush.period = 1000
    ## memory store RingBuffer size, should be Math.pow(2,n)
    canal.instance.memory.buffer.size = 16384
    ## memory store RingBuffer used memory unit size , default 1kb
    canal.instance.memory.buffer.memunit = 1024 
    ## meory store gets mode used MEMSIZE or ITEMSIZE
    canal.instance.memory.batch.mode = MEMSIZE
    canal.instance.memory.rawEntry = true
    
    ## detecing config
    canal.instance.detecting.enable = false
    #canal.instance.detecting.sql = insert into retl.xdual values(1,now()) on duplicate key update x=now()
    canal.instance.detecting.sql = select 1
    canal.instance.detecting.interval.time = 3
    canal.instance.detecting.retry.threshold = 3
    canal.instance.detecting.heartbeatHaEnable = false
    
    # support maximum transaction size, more than the size of the transaction will be cut into multiple transactions delivery
    canal.instance.transaction.size =  1024
    # mysql fallback connected to new master should fallback times
    canal.instance.fallbackIntervalInSeconds = 60
    
    # network config
    canal.instance.network.receiveBufferSize = 16384
    canal.instance.network.sendBufferSize = 16384
    canal.instance.network.soTimeout = 30
    
    # binlog filter config
    canal.instance.filter.druid.ddl = true
    canal.instance.filter.query.dcl = false
    canal.instance.filter.query.dml = false
    canal.instance.filter.query.ddl = false
    canal.instance.filter.table.error = false
    canal.instance.filter.rows = false
    canal.instance.filter.transaction.entry = false
    
    # binlog format/image check
    canal.instance.binlog.format = ROW,STATEMENT,MIXED 
    canal.instance.binlog.image = FULL,MINIMAL,NOBLOB
    
    # binlog ddl isolation
    canal.instance.get.ddl.isolation = false
    
    # parallel parser config
    canal.instance.parser.parallel = true
    ## concurrent thread number, default 60% available processors, suggest not to exceed Runtime.getRuntime().availableProcessors()
    #canal.instance.parser.parallelThreadSize = 16
    ## disruptor ringbuffer size, must be power of 2
    canal.instance.parser.parallelBufferSize = 256
    
    # table meta tsdb info
    canal.instance.tsdb.enable = true
    canal.instance.tsdb.dir = ${canal.file.data.dir:../conf}/${canal.instance.destination:}
    canal.instance.tsdb.url = jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
    canal.instance.tsdb.dbUsername = canal
    canal.instance.tsdb.dbPassword = canal
    # dump snapshot interval, default 24 hour
    canal.instance.tsdb.snapshot.interval = 24
    # purge snapshot expire , default 360 hour(15 days)
    canal.instance.tsdb.snapshot.expire = 360
    
    #################################################
    ######### 		destinations		#############
    #################################################
    canal.destinations = 
    # conf root dir
    canal.conf.dir = ../conf
    # auto scan instance dir add/remove and start/stop instance
    canal.auto.scan = true
    canal.auto.scan.interval = 5
    
    canal.instance.tsdb.spring.xml = classpath:spring/tsdb/h2-tsdb.xml
    #canal.instance.tsdb.spring.xml = classpath:spring/tsdb/mysql-tsdb.xml
    
    canal.instance.global.mode = manager
    canal.instance.global.lazy = false
    canal.instance.global.manager.address = ${canal.admin.manager}
    #canal.instance.global.spring.xml = classpath:spring/memory-instance.xml
    canal.instance.global.spring.xml = classpath:spring/file-instance.xml
    #canal.instance.global.spring.xml = classpath:spring/default-instance.xml
    
    ##################################################
    ######### 	      MQ Properties      #############
    ##################################################
    # aliyun ak/sk , support rds/mq
    canal.aliyun.accessKey =
    canal.aliyun.secretKey =
    canal.aliyun.uid=
    
    canal.mq.flatMessage = true
    canal.mq.canalBatchSize = 50
    canal.mq.canalGetTimeout = 100
    # Set this value to "cloud", if you want open message trace feature in aliyun.
    canal.mq.accessChannel = local
    
    canal.mq.database.hash = true
    canal.mq.send.thread.size = 30
    canal.mq.build.thread.size = 8
    
    ##################################################
    ######### 		     Kafka 		     #############
    ##################################################
    kafka.bootstrap.servers = 127.0.0.1:6667
    kafka.acks = all
    kafka.compression.type = none
    kafka.batch.size = 16384
    kafka.linger.ms = 1
    kafka.max.request.size = 1048576
    kafka.buffer.memory = 33554432
    kafka.max.in.flight.requests.per.connection = 1
    kafka.retries = 0
    
    kafka.kerberos.enable = false
    kafka.kerberos.krb5.file = "../conf/kerberos/krb5.conf"
    kafka.kerberos.jaas.file = "../conf/kerberos/jaas.conf"
    
    ##################################################
    ######### 		    RocketMQ	     #############
    ##################################################
    rocketmq.producer.group = test
    rocketmq.enable.message.trace = false
    rocketmq.customized.trace.topic =
    rocketmq.namespace =
    rocketmq.namesrv.addr = 127.0.0.1:9876
    rocketmq.retry.times.when.send.failed = 0
    rocketmq.vip.channel.enabled = false
    
    ##################################################
    ######### 		    RabbitMQ	     #############
    ##################################################
    rabbitmq.host = 192.168.11.122
    rabbitmq.virtual.host = /queue_333
    rabbitmq.exchange = 44444
    rabbitmq.username = 55555
    rabbitmq.password = 444
    

    子配置文件

    #################################################
    ## mysql serverId , v1.0.26+ will autoGen
    canal.instance.mysql.slaveId=88
    
    # enable gtid use true/false
    canal.instance.gtidon=false
    
    # position info
    canal.instance.master.address=192.168.11.134:3306
    canal.instance.master.journal.name=
    canal.instance.master.position=
    canal.instance.master.timestamp=
    canal.instance.master.gtid=
    
    # rds oss binlog
    canal.instance.rds.accesskey=
    canal.instance.rds.secretkey=
    canal.instance.rds.instanceId=
    
    # table meta tsdb info
    canal.instance.tsdb.enable=true
    #canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
    #canal.instance.tsdb.dbUsername=canal
    #canal.instance.tsdb.dbPassword=canal
    
    #canal.instance.standby.address =
    #canal.instance.standby.journal.name =
    #canal.instance.standby.position =
    #canal.instance.standby.timestamp =
    #canal.instance.standby.gtid=
    
    # username/password
    canal.instance.dbUsername=root
    canal.instance.dbPassword=dg123456
    canal.instance.connectionCharset = UTF-8
    # enable druid Decrypt database password
    canal.instance.enableDruid=false
    #canal.instance.pwdPublicKey=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALK4BUxdDltRRE5/zXpVEVPUgunvscYFtEip3pmLlhrWpacX7y7GCMo2/JM6LeHmiiNdH1FWgGCpUfircSwlWKUCAwEAAQ==
    
    # table regex
    canal.instance.filter.regex=dg_directories.tb_area_agent_copy1
    # table black regex
    canal.instance.filter.black.regex=
    # table field filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
    #canal.instance.filter.field=test1.t_product:id/subject/keywords,test2.t_company:id/name/contact/ch
    # table field black filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
    #canal.instance.filter.black.field=test1.t_product:subject/product_image,test2.t_company:id/name/contact/ch
    
    # mq config
    canal.mq.topic=queue_brady_test
    
    
    # dynamic topic route by schema or table regex
    #canal.mq.dynamicTopic=mytest1.user,mytest2\..*,.*\..*
    canal.mq.partition=0
    # hash partition config
    #canal.mq.partitionsNum=3
    #canal.mq.partitionHash=test.table:id^name,.*\..*
    #################################################
    
    
  • 相关阅读:
    LG P4449 & JZOJ 于神之怒
    [国家集训队]Crash的数字表格
    LG P3768 简单的数学题
    NOI2018 屠龙勇士
    为什么从后台获取的id到前端后却变了?Long类型转json时前端js丢失精度解决方案-----@JsonSerialize和@JsonDeserialize
    vue的filters过滤器优化
    根据key查询redis中是否存在key对应的value,根据key获取值
    PowerDesigner逆向工程将MYSQL数据库转成pdm
    解决图片验证码不显示的问题
    报错:Unknown column 'province' in 'field list'
  • 原文地址:https://www.cnblogs.com/brady-wang/p/14472968.html
Copyright © 2020-2023  润新知