• PXC5.7(Percona XtraDB Cluster)+HAproxy+Keepalived 集群部署


    Percona-XtraDB-Cluster+Haproxy 搭建集群环境

    环境准备及服务器信息:

    配置防火墙

    firewall-cmd --add-port=3306/tcp --permanent
    firewall-cmd --add-port=4567/tcp --permanent
    firewall-cmd --add-port=4568/tcp --permanent
    firewall-cmd --add-port=4444/tcp --permanent
    firewall-cmd --reload


    安装官方yum源repo配置文件

    yum install http://www.percona.com/downloads/percona-release/redhat/0.1-4/percona-release-0.1-4.noarch.rpm
    

      

    安装pxc

    yum -y install Percona-XtraDB-Cluster-57
    yum install Percona-Server-client-57
    

      

    创建用户和组

    groupadd mysql
    useradd -g mysql -s /sbin/nologin mysql
    

      

    创建目录并赋权

    mkdir /data/mysql/{data,binlog,slow,logs} -p
    touch /data/mysql/logs/mysqld.log
    chown -R mysql:mysql /data/mysql


    修改/etc/my.cnf配置文件

    vi /etc/my.cnf
    增加[mysqld] ##已知BUG
      1 vi /etc/percona-xtradb-cluster.conf.d/mysqld.cnf
      2 替换为:
      3 [client]
      4 port = 3306
      5 socket = /data/mysql/data/mysql.sock
      6 default-character-set = utf8mb4
      7 
      8 [mysqld]
      9 # basic settings #
     10 user = mysql
     11 port=3306
     12 sql_mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER"
     13 autocommit = 1
     14 server-id=163
     15 character_set_server=utf8mb4
     16 init_connect='SET NAMES utf8'
     17 transaction_isolation = READ-COMMITTED
     18 lower_case_table_names = 1
     19 explicit_defaults_for_timestamp = 1
     20 max_allowed_packet = 16777216
     21 event_scheduler = 1
     22 datadir = /data/mysql/data
     23 basedir = /var/lib/mysql
     24 pid-file = /data/mysql/data/mysqld.pid
     25 socket = /data/mysql/data/mysql.sock
     26 default-time_zone = '+8:00'
     27 
     28 # connection #
     29 interactive_timeout = 1800
     30 wait_timeout = 1800
     31 lock_wait_timeout = 1800
     32 skip_name_resolve = 1
     33 max_connections = 5000
     34 max_connect_errors = 1000000
     35 
     36 # table cache performance settings
     37 table_open_cache = 4096
     38 table_definition_cache = 4096
     39 table_open_cache_instances = 128
     40 
     41 # session memory settings #
     42 read_buffer_size = 5M
     43 read_rnd_buffer_size = 10M
     44 sort_buffer_size = 10M
     45 tmp_table_size = 25M
     46 join_buffer_size = 40M
     47 thread_cache_size = 20M
     48 
     49 # log settings #
     50 log_error = /data/mysql/logs/mysqld.log
     51 slow_query_log_file = /data/mysql/slow/slow.log
     52 log-bin= /data/mysql/binlog/mysql-bin
     53 relay_log = mysql-relay-bin
     54 general_log_file= general.log
     55 
     56 slow_query_log = 1
     57 log_queries_not_using_indexes = 1
     58 log_slow_admin_statements = 1
     59 log_slow_slave_statements = 1
     60 log_throttle_queries_not_using_indexes = 10
     61 long_query_time = 1
     62 min_examined_row_limit = 100
     63 binlog-rows-query-log-events = 1
     64 log-bin-trust-function-creators = 1
     65 expire-logs-days = 7
     66 log-slave-updates = 1
     67 
     68 # innodb settings #
     69 innodb_page_size = 16384
     70 innodb_buffer_pool_size = 256M
     71 innodb_buffer_pool_instances = 8
     72 innodb_buffer_pool_load_at_startup = 1
     73 innodb_buffer_pool_dump_at_shutdown = 1
     74 innodb_lru_scan_depth = 4096
     75 innodb_lock_wait_timeout = 5
     76 innodb_io_capacity = 10000
     77 innodb_io_capacity_max = 20000
     78 innodb_flush_method = O_DIRECT
     79 innodb_file_format = Barracuda
     80 innodb_file_format_max = Barracuda
     81 
     82 #undo
     83 innodb_undo_directory = /data/mysql/data
     84 innodb_undo_logs = 128
     85 innodb_undo_tablespaces = 3
     86 
     87 #redo
     88 innodb_log_group_home_dir = /data/mysql/data
     89 innodb_log_file_size = 10M
     90 innodb_log_files_in_group = 2
     91 
     92 innodb_flush_neighbors = 0
     93 innodb_log_buffer_size = 16384
     94 innodb_purge_threads = 4
     95 innodb_large_prefix = 1
     96 innodb_thread_concurrency = 64
     97 innodb_print_all_deadlocks = 1
     98 innodb_strict_mode = 1
     99 innodb_sort_buffer_size = 16384
    100 innodb_write_io_threads = 16
    101 innodb_read_io_threads = 16 
    102 innodb_file_per_table = 1
    103 innodb_stats_persistent_sample_pages = 64
    104 innodb_autoinc_lock_mode = 2
    105 innodb_online_alter_log_max_size=100M
    106 innodb_open_files=4096
    107 
    108 # replication settings #
    109 master_info_repository = TABLE
    110 relay_log_info_repository = TABLE
    111 sync_binlog = 1
    112 gtid_mode = on
    113 enforce_gtid_consistency = 1
    114 log_slave_updates
    115 binlog_format = ROW
    116 binlog_rows_query_log_events = 1
    117 relay_log = relay.log
    118 relay_log_recovery = 1
    119 slave_skip_errors = ddl_exist_errors
    120 slave-rows-search-algorithms = 'INDEX_SCAN,HASH_SCAN'
    121 
    122 [mysqld-5.6]
    123 # metalock performance settings
    124 metadata_locks_hash_instances=64
    125 
    126 [mysqld-5.7]
    127 # new innodb settings #
    128 loose_innodb_numa_interleave=1
    129 innodb_buffer_pool_dump_pct = 40
    130 innodb_page_cleaners = 16
    131 innodb_undo_log_truncate = 1
    132 innodb_max_undo_log_size = 100M #2G
    133 innodb_purge_rseg_truncate_frequency = 128
    134 # new replication settings #
    135 slave-parallel-type = LOGICAL_CLOCK
    136 slave-parallel-workers = 16
    137 slave_preserve_commit_order=1
    138 slave_transaction_retries=128
    139 # other change settings #
    140 binlog_gtid_simple_recovery=1
    141 log_timestamps=system
    142 show_compatibility_56=on
    143 
    144 # Disabling symbolic-links is recommended to prevent assorted security risks
    145 symbolic-links=0
    View Code
    vi /etc/percona-xtradb-cluster.conf.d/mysqld_safe.cnf 
    替换pid-file、socket为:
    pid-file = /data/mysql/data/mysqld.pid
    socket = /data/mysql/data/mysql.sock
    
    vi /etc/percona-xtradb-cluster.conf.d/wsrep.cnf 
    修改:
    wsrep_cluster_address=gcomm://192.168.253.28,192.168.253.29,192.168.253.30 ##根据实际修改
    wsrep_retry_autocommit=1
    wsrep_auto_increment_control=1
    wsrep_node_name =pxc-linux-29 ##根据实际修改
    wsrep_node_address=192.168.253.29 ##根据实际修改
    wsrep_sst_method=xtrabackup-v2
    wsrep_cluster_name=test-pxc
    wsrep_sst_auth="sstuser:s3cretPass" ##根据实际修改
    

    第一个节点启动

    systemctl start mysql@bootstrap

    备注:谨记,只要是启动集群的第一个Node(首次搭建集群或者集群全部关闭),都要用此命令

    修改密码

    mysql5.7版本日志均在error.log 里面生成
    grep "temporary password" /data/logs/mysql/mysqld.log
    
    使用改密码登陆MySQL,修改成自己想要的密码
    mysql> alter user 'root'@'localhost' idnetified by 'abc123';
    
    创建SST同步用户
    mysql> GRANT PROCESS,RELOAD,LOCK TABLES,REPLICATION CLIENT ON *.* TO 'sstuser'@'192.168.%.%' IDENTIFIED BY 's3cretPass';
    mysql> flush privileges;
    

     

    其它节点启动

    systemctl start mysql

    备注:谨记,只要集群有一个Node启动,其余节点都是用此命令


    Haproxy负载均衡

    Haproxy是一个反向代理负载均衡解决方案,支持4层和7层模式,提供后端服务器健康检查,非常稳定。淘宝前期也使用Haproxy作为CDN系统负载均衡器

    安装haproxy

    yum -y install haproxy
    在cluster的MySQL上创建用户(一个节点创建,会被复制到其它节点)
    监控用帐号:
    grant usage on *.* to 'pxc-monitor'@'%' identified by 'testpxc';
    
    
    服务测试帐号:
    grant all privileges on *.* to 'zxw'@'%' identified by 'xxwzopop';

    配置haproxy.cfg

     1 vi /etc/haproxy/haproxy.cfg 
     2 #---------------------------------------------------------------------
     3 # Global settings
     4 #---------------------------------------------------------------------
     5 global
     6         log 127.0.0.1 local2
     7         chroot /var/lib/haproxy
     8         pidfile /var/run/haproxy.pid
     9         maxconn 4000
    10         user haproxy
    11         group haproxy
    12         daemon
    13 defaults
    14         mode tcp
    15         log global
    16         option tcplog
    17         option dontlognull
    18         retries 3
    19         timeout http-request 10s
    20         timeout queue 1m
    21         timeout connect 10s
    22         timeout client 1m
    23         timeout server 1m
    24         timeout http-keep-alive 10s
    25         timeout check 10s
    26         maxconn 3000
    27 frontend mysql
    28         bind *:3307
    29         mode tcp
    30         #log global
    31         option tcplog
    32         default_backend mysqlservers
    33 backend mysqlservers
    34         balance leastconn
    35         server dbsrv1 10.10.48.62:3306 check port 9200 rise 1 fall 2 maxconn 300
    36         server dbsrv2 10.10.48.64:3306 check port 9200 rise 1 fall 2 maxconn 300
    37         server dbsrv2 10.10.48.66:3306 check port 9200 rise 1 fall 2 maxconn 300
    38 ## 定义一个监控页面,监听在1080端口,并启用了验证机制
    39 listen stats
    40         mode http
    41         bind 0.0.0.0:8888
    42         stats enable
    43         stats hide-version
    44         stats uri /haproxyadmin?stats
    45         stats realm Haproxy Statistics
    46         stats auth admin:admin
    47         stats admin if TRUE
    View Code

    配置haproxy的日志:

    安装完HAProxy后,默认情况下,HAProxy为了节省读写IO所消耗的性能,默认情况下没有日志输出,一下是开启日志的过程

    yum -y install rsyslog
    
    # vim /etc/rsyslog.conf 
    # 
    # 
    ...........
    $ModLoad imudp
    $UDPServerRun 514 //rsyslog 默认情况下,需要在514端口监听UDP,所以可以把这两行注释掉
    .........
    local2.* /var/log/haproxy.log //和haproxy的配置文件中定义的log level一致
    
    
    systemctl start rsyslog
    

    在PXC 每个mysql节点安装mysql健康状态检查脚本(需要在pxc的每个节点执行)

    脚本拷贝

    # cp /usr/local/mysql/bin/clustercheck /usr/bin/
    # cp /usr/local/mysql/xinetd.d/mysqlchk /etc/xinetd.d/
    
    ps:clustercheck和脚本都是默认值没有修改

    创建mysql用户,用于mysql健康检查(在任一节点即可):

    grant process on *.* to 'clustercheckuser'@'localhost' identified by 'clustercheckpassword!';
    flush privileges;
    
    ps:如不使用clustercheck中默认用户名和密码,将需要修改clustercheck脚本,MYSQL_USERNAME和MYSQL_PASSWORD值
    
    更改用户名和密码(三个节点都得修改)
    
    #vim /usr/bin/clustercheck
    MYSQL_USERNAME="pxc-monitor"
    MYSQL_PASSWORD="testpxc"

    更改/etc/services添加mysqlchk的服务端口号:

    echo 'mysqlchk 9200/tcp # mysqlchk' >> /etc/services

    安装xinetd服务,通过守护进程来管理mysql健康状态检查脚本

    yum -y install xinetd ###非常重要
    systemctl enable xinetd systemctl start xinetd

    clustercheck脚本测试

    /usr/bin/clustercheck
    
    HTTP/1.1 200 OK
    Content-Type: text/plain
    Connection: close
    Content-Length: 40
    
    Percona XtraDB Cluster Node is synced.
    
    ps:要保证状态为200,否则检测不通过,可能是mysql服务不正常,或者环境不对致使haproxy无法使用mysql
    haproxy如何侦测 MySQL Server 是否存活,靠着就是 9200 port,透过 Http check 方式,让 HAProxy 知道 PXC 状态
    

    启动haproxy

    #启动命令
    haproxy -f /etc/haproxy/haproxy.cfg

    #检查后台进程 ps -ef |grep haproxy haproxy 9754 0 0 11:29 ? 00:00:00 haproxy -f /etc/haproxy/haproxy.cfg root 9823 74 0 11:30 ? 00:00:00 grep --color=auto haproxy
    #检查端口情况 netstat -nlap |grep haproxy tcp 0 0 0.0.0.0:3307 0.0.0.0:* LISTEN 9754/haproxy tcp 0 0 0.0.0.0:8088 0.0.0.0:* LISTEN 9754/haproxy udp 0 0 0.0.0.0:59349 0.0.0.0:* 9754/haproxy unix 2 [ ACC ] STREAM LISTENING 30637572 9754/haproxy /var/lib/haproxy/stats.9753.tmp
    #配置开机自启动 # cp /usr/local/sbin/haproxy /usr/sbin/haproxy cd /opt/soft/haproxy-1.5.3/examples [root@db169 examples]# cp haproxy.init /etc/init.d/haproxy [root@db169 examples]# chmod +x /etc/init.d/haproxy

    haproxy测试

    在mysql pxc创建测试账号:
    
    grant all privileges on *.* to 'zxw'@'%' identified by 'xxwzopop';
    #for i in `seq 1 10`;do mysql -h 192.168.1.163 -P3307 -uzxw -pxxwzopop -e "select @@hostname;";done
    
    注:其实可以只允许haproxy侧的IP访问即可,因用户通过vip访问mysql集群,haproxy根据调度策略使用自己的ip创建与后端mysql服务器的连接。
    

    查看Haproxy状态:

    http://192.168.1.163:8088/haproxy/stats
    输入用户密码:stats auth pxcstats:xxwzopop


    用keepalived实现haproxy 的高可用

    安装

    yum install -y gcc openssl-devel popt-devel ipvsadm
    yum -y install kernel kernel-devel* popt popt-devel libssl-dev libnl libnl-devel openssl openssl-* ipvsadm libnfnetlink-devel
    yum install keepalived -y
    yum install MySQL-python -y
    mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

    开启防火墙VRRP

    #开启vrrp 协议  否则会出现双VIP的情况
    
     firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0  --protocol vrrp -j ACCEPT
     firewall-cmd --reload

    配置

    vi /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
      router_id haproxy_pxc #keepalived组的名称
    }
    vrrp_script chk_haprocy {
      script "/etc/keepalived/check_haproxy.sh" 
      interval 2
      weight 2
    }
    vrrp_instance VI_HAPROXY {
      state MASTER #备份机是BACKUP
      #nopreempt #非抢占模式
      interface eth0
      virtual_router_id 51 #同一集群中该数值要相同,只能从1-255
      priority 100 //备库可以90
      advert_int 1
      authentication {
      auth_type PASS #Auth 用密码,但密码不要超过8位
      auth_pass 1111
    }
    track_script {
      chk_haprocy
    }
    virtual_ipaddress {
      192.168.1.188/24
    }
    }
    vi /etc/keepalived/check_haproxy.sh
    #!/bin/bash
    A=`ps -C haproxy --no-header |wc -l`
    if [ $A -eq 0 ];then
    /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg 
    sleep 3
    if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
    /etc/init.d/keepalived stop
    fi
    fi
    
    chmod 755 /etc/keepalived/check_haproxy.sh
    
    # 迁移数据
    
    vi /etc/percona-xtradb-cluster.conf.d/mysqld.cnf
    replicate-do-db=dbtest
    
    导入昨天全量备份数据
    
    change master to master_host='10.200.22.33',master_port=3306,master_user='repl',master_password='mysql',master_log_file='master-bin.000009',master_log_pos=674;
    
    start slave;
    

    PMM监控:

    pmm server:

    docker pull percona/pmm-server:latest
    mkdir -p /data/pmm_data/opt/prometheus/data
    mkdir -p /data/pmm_data/opt/consul-data
    mkdir -p /data/pmm_data/var/lib/mysql
    mkdir -p /data/pmm_data/var/lib/grafana
    docker create -v /data/pmm_data/opt/prometheus/data -v /data/pmm_data/opt/consul-data -v /data/pmm_data/var/lib/mysql -v /data/pmm_data/var/lib/grafana --name pmm-data percona/pmm-server:latest /bin/true
    
    docker run -d 
    -e ORCHESTRATOR_ENABLED=true 
    -e METRICS_RETENTION=720h 
    -e SERVER_USER=admin 
    -e SERVER_PASSWORD=abcd.1234 
    -p 8080:80 
    --net staticnet 
    --ip 192.168.0.11 
    --volumes-from pmm-data 
    --name pmm-server 
    --restart always percona/pmm-server:latest
    

    pmm client:

    yum -y install http://www.percona.com/downloads/percona-release/redhat/0.1-4/percona-release-0.1-4.noarch.rpm
    yum install pmm-client -y
    

    新加入配置

    pmm-admin config --server 10.1.12.114:8080 --server-user admin --server-password abcd.1234
    #添加linux监控
    pmm-admin add linux:metrics
    #创建MySQL监控账号并开启innodb_monitor_enable
    GRANT SELECT, PROCESS, SHOW DATABASES, REPLICATION CLIENT ON *.* TO 'pmm'@'10.1.%' IDENTIFIED BY 'L2iLf#eqISQ613u^';
    set global innodb_monitor_enable=all;
    #添加MySQL监控
    pmm-admin add mysql --user pmm --password 'L2iLf#eqISQ613u^' --host 10.1.21.33 --query-source perfschema
    
    #查看配置信息
    pmm-admin list
    #检查网络
    pmm-admin check-network
    
    [linux:metrics] OK, already monitoring this system.
    [mysql:metrics] OK, now monitoring MySQL metrics using DSN root:***@unix(/data/mysql/data/mysql.sock)
    [mysql:queries] OK, now monitoring MySQL queries from slowlog using DSN root:***@unix(/data/mysql/data/mysql.sock)


    前端访问地址:
    浏览器输入Server IP : http://10.200.22.33:8881
    输入默认的用户名密码:admin

    PMM客户端安装(RPM包):

    1.下载rpm安装

    wget https://www.percona.com/downloads/pmm/1.17.1/binary/redhat/7/x86_64/pmm-client-1.17.1-1.el7.x86_64.rpm

    2.安装

    rpm -ivh pmm-client-1.17.1-1.el7.x86_64.rpm

    3.检查PMM版本

    pmm-admin --version

    4.开通防火墙,并确认网络环境

    firewall-cmd --zone=public --add-port=42000/tcp --permanent
    firewall-cmd --zone=public --add-port=42002/tcp --permanent
    firewall-cmd --reload

    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    1.先备份iptables

    cp /etc/sysconfig/iptables /var/tmp

    2.修改/etc/sysconfig/iptables文件
    vi /etc/sysconfig/iptables
    -A INPUT -p tcp -m state --state NEW -m tcp --dport 42000 -j ACCEPT
    -A INPUT -p udp -m state --state NEW -m udp --dport 42000 -j ACCEPT
    -A INPUT -p tcp -m state --state NEW -m tcp --dport 42002 -j ACCEPT
    -A INPUT -p udp -m state --state NEW -m udp --dport 42002 -j ACCEPT

    3.重启防火墙

    service iptables restart

    5.配置

    a.配置Server

    pmm-admin config --server 10.1.12.114:8080 --server-user admin --server-password abcd.1234 --client-name=bxjc-m-48-12

    b.添加linux监控

    pmm-admin add linux:metrics

    c.添加MySQL监控

    #创建MySQL监控账号并开启innodb_monitor_enable
    GRANT SELECT, PROCESS, SHOW DATABASES, REPLICATION CLIENT ON *.* TO 'pmm'@'10.1.%' IDENTIFIED BY 'L2iLf#eqISQ613u^';
    set global innodb_monitor_enable=all;

    pmm-admin add mysql --user pmm --password 'L2iLf#eqISQ613u^' --host 10.10.48.12 --port=3306 --query-source perfschema

    d.检查配置情况

    pmm-admin list
    pmm-admin check-network

  • 相关阅读:
    关于AE大数据点文件读取生成SHP文件时使用IFeatureBuffer快速提高读取效率
    随手写了个opengl的demo
    render Target sample in UI
    堆排序
    PAT 1040. Longest Symmetric String
    为什么要学习机器学习?如何学习
    输出一个字符串的全排列
    关于Logistic Regression的疑问
    PAT上机考试模板
    如何设置tomcat的默认访问路径
  • 原文地址:https://www.cnblogs.com/EikiXu/p/10394571.html
Copyright © 2020-2023  润新知