• 架构第三周作业


    第三周

    一、redis服务配置文件详解

    bind 0.0.0.0 #监听地址,可以用空格隔开后多个监听IP
    
    protected-mode yes #redis3.2之后加入的新特性,在没有设置bind IP和密码的时候,redis只允许访问127.0.0.1:6379,可以远程连接,但当访问将提示警告信息并拒绝远程访问
    
    port 6379 #监听端口,默认6379/tcp
    
    tcp-backlog 511 #三次握手的时候server端收到client ack确认号之后的队列值,即全连接队列长度
    
    timeout 0 #客户端和Redis服务端的连接超时时间,默认是0,表示永不超时
    
    tcp-keepalive 300 #tcp 会话保持时间300s
    
    daemonize no #默认no,即直接运行redis-server程序时,不作为守护进程运行,而是以前台方式运行,如果想在后台运行需改成yes,当redis作为守护进程运行的时候,它会写一个 pid 到/var/run/redis.pid 文件
    
    supervised no #和OS相关参数,可设置通过upstart和systemd管理Redis守护进程,centos7后都使用systemdpidfile /var/run/redis_6379.pid #pid文件路径,可以修改为/apps/redis/run/redis_6379.pid
    
    loglevel notice #日志级别
    
    logfile "/path/redis.log" #日志路径,示例:logfile "/apps/redis/log/redis_6379.log"
    
    databases 16 #设置数据库数量,默认:0-15,共16个库
    
    always-show-logo yes #在启动redis 时是否显示或在日志中记录记录redis的logo
    
    save 900 1 #在900秒内有1个key内容发生更改,就执行快照机制
    
    save 300 10 #在300秒内有10个key内容发生更改,就执行快照机制
    
    save 60 10000  #60秒内如果有10000个key以上的变化,就自动快照备份
    
    stop-writes-on-bgsave-error yes #默认为yes时,可能会因空间满等原因快照无法保存出错时,会禁止redis写入操作,生产建议为no
     #此项只针对配置文件中的自动save有效
     
    rdbcompression yes #持久化到RDB文件时,是否压缩,"yes"为压缩,"no"则反之
    
    rdbchecksum yes #是否对备份文件开启RC64校验,默认是开启
    
    dbfilename dump.rdb #快照文件名
    
    dir ./ #快照文件保存路径,示例:dir "/apps/redis/data"
    
    #主从复制相关
    
    # replicaof <masterip> <masterport> #指定复制的master主机地址和端口,5.0版之前的指令为slaveof 
    
    # masterauth <master-password> #指定复制的master主机的密码
    
    replica-serve-stale-data yes #当从库同主库失去连接或者复制正在进行,从机库有两种运行方式:
    1、设置为yes(默认设置),从库会继续响应客户端的读请求,此为建议值
    2、设置为no,除去特定命令外的任何请求都会返回一个错误"SYNC with master in progress"。
    
    replica-read-only yes #是否设置从库只读,建议值为yes,否则主库同步从库时可能会覆盖数据,造成数据丢失
    
    repl-diskless-sync no #是否使用socket方式复制数据(无盘同步),新slave第一次连接master时需要做数据的全量同步,redis server就要从内存dump出新的RDB文件,然后从master传到slave,有两种方式把RDB文件传输给客户端:
    1、基于硬盘(disk-backed):为no时,master创建一个新进程dump生成RDB磁盘文件,RDB完成之后由父进程(即主进程)将RDB文件发送给slaves,此为默认值
    2、基于socket(diskless):master创建一个新进程直接dump RDB至slave的网络socket,不经过主进程和硬盘
    #推荐使用基于硬盘(为no),是因为RDB文件创建后,可以同时传输给更多的slave,但是基于socket(为yes), 新slave连接到master之后得逐个同步数据。只有当磁盘I/O较慢且网络较快时,可用diskless(yes),否则一般建议使用磁盘(no)
    
    repl-diskless-sync-delay 5 #diskless时复制的服务器等待的延迟时间,设置0为关闭,在延迟时间内到达的客户端,会一起通过diskless方式同步数据,但是一旦复制开始,master节点不会再接收新slave的复制请求,直到下一次同步开始才再接收新请求。即无法为延迟时间后到达的新副本提供服务,新副本将排队等待下一次RDB传输,因此服务器会等待一段时间才能让更多副本到达。推荐值:30-60
    
    repl-ping-replica-period 10 #slave根据master指定的时间进行周期性的PING master,用于监测master状态,默认10s
    
    repl-timeout 60 #复制连接的超时时间,需要大于repl-ping-slave-period,否则会经常报超时
    
    repl-disable-tcp-nodelay no #是否在slave套接字发送SYNC之后禁用 TCP_NODELAY,如果选择"yes",Redis将合并多个报文为一个大的报文,从而使用更少数量的包向slaves发送数据,但是将使数据传输到slave上有延迟,Linux内核的默认配置会达到40毫秒,如果 "no" ,数据传输到slave的延迟将会减少,但要使用更多的带宽
    
    repl-backlog-size 512mb #复制缓冲区内存大小,当slave断开连接一段时间后,该缓冲区会累积复制副本数据,因此当slave 重新连接时,通常不需要完全重新同步,只需传递在副本中的断开连接后没有同步的部分数据即可。只有在至少有一个slave连接之后才分配此内存空间,建议建立主从时此值要调大一些或在低峰期配置,否则会导致同步到slave失败
    
    repl-backlog-ttl 3600 #多长时间内master没有slave连接,就清空backlog缓冲区
    
    replica-priority 100 #当master不可用,哨兵Sentinel会根据slave的优先级选举一个master,此值最低的slave会优先当选master,而配置成0,永远不会被选举,一般多个slave都设为一样的值,让其自动选择
    
    #min-replicas-to-write 3 #至少有3个可连接的slave,mater才接受写操作
    
    #min-replicas-max-lag 10 #和上面至少3个slave的ping延迟不能超过10秒,否则master也将停止写操作
    
    requirepass foobared #设置redis连接密码,之后需要AUTH pass,如果有特殊符号,用" "引起来,生产建议设置
    
    rename-command #重命名一些高危命令,示例:rename-command FLUSHALL "" 禁用命令
       #示例: rename-command del magedu
    
    maxclients 10000 #Redis最大连接客户端
    
    maxmemory <bytes> #redis使用的最大内存,单位为bytes字节,0为不限制,建议设为物理内存一半,8G内存的计算方式8(G)*1024(MB)1024(KB)*1024(Kbyte),需要注意的是缓冲区是不计算在maxmemory内,生产中如果不设置此项,可能会导致OOM
    
    appendonly no #是否开启AOF日志记录,默认redis使用的是rdb方式持久化,这种方式在许多应用中已经足够用了,但是redis如果中途宕机,会导致可能有几分钟的数据丢失(取决于dump数据的间隔时间),根据save来策略进行持久化,Append Only File是另一种持久化方式,可以提供更好的持久化特性,Redis会把每次写入的数据在接收后都写入 appendonly.aof 文件,每次启动时Redis都会先把这个文件的数据读入内存里,先忽略RDB文件。默认不启用此功能
    
    appendfilename "appendonly.aof" #文本文件AOF的文件名,存放在dir指令指定的目录中
    
    appendfsync everysec #aof持久化策略的配置
    #no表示由操作系统保证数据同步到磁盘,Linux的默认fsync策略是30秒,最多会丢失30s的数据
    #always表示每次写入都执行fsync,以保证数据同步到磁盘,安全性高,性能较差
    #everysec表示每秒执行一次fsync,可能会导致丢失这1s数据,此为默认值,也生产建议值
    
    #同时在执行bgrewriteaof操作和主进程写aof文件的操作,两者都会操作磁盘,而bgrewriteaof往往会涉及大量磁盘操作,这样就会造成主进程在写aof文件的时候出现阻塞的情形,以下参数实现控制
    no-appendfsync-on-rewrite no #在aof rewrite期间,是否对aof新记录的append暂缓使用文件同步策略,主要考虑磁盘IO开支和请求阻塞时间。
    #默认为no,表示"不暂缓",新的aof记录仍然会被立即同步到磁盘,是最安全的方式,不会丢失数据,但是要忍受阻塞的问题
    #为yes,相当于将appendfsync设置为no,这说明并没有执行磁盘操作,只是写入了缓冲区,因此这样并不会造成阻塞(因为没有竞争磁盘),但是如果这个时候redis挂掉,就会丢失数据。丢失多少数据呢?Linux的默认fsync策略是30秒,最多会丢失30s的数据,但由于yes性能较好而且会避免出现阻塞因此比较推荐
    
    #rewrite 即对aof文件进行整理,将空闲空间回收,从而可以减少恢复数据时间
    
    auto-aof-rewrite-percentage 100 #当Aof log增长超过指定百分比例时,重写AOF文件,设置为0表示不自动重写Aof日志,重写是为了使aof体积保持最小,但是还可以确保保存最完整的数据
    
    auto-aof-rewrite-min-size 64mb #触发aof rewrite的最小文件大小
    
    aof-load-truncated yes #是否加载由于某些原因导致的末尾异常的AOF文件(主进程被kill/断电
    等),建议yes
    
    aof-use-rdb-preamble no #redis4.0新增RDB-AOF混合持久化格式,在开启了这个功能之后,AOF重写产生的文件将同时包含RDB格式的内容和AOF格式的内容,其中RDB格式的内容用于记录已有的数据,而AOF格式的内容则用于记录最近发生了变化的数据,这样Redis就可以同时兼有RDB持久化和AOF持久化的优点既能够快速地生成重写文件,也能够在出现问题时,快速地载入数据),默认为no,即不启用此功能
    
    lua-time-limit 5000 #lua脚本的最大执行时间,单位为毫秒
    
    cluster-enabled yes #是否开启集群模式,默认不开启,即单机模式
    
    cluster-config-file nodes-6379.conf #由node节点自动生成的集群配置文件名称
    
    cluster-node-timeout 15000 #集群中node节点连接超时时间,单位ms,超过此时间,会踢出集群
    
    cluster-replica-validity-factor 10 #单位为次,在执行故障转移的时候可能有些节点和master断开一段时间导致数据比较旧,这些节点就不适用于选举为master,超过这个时间的就不会被进行故障转移,不能当选master,计算公式:(node-timeout * replica-validity-factor) + repl-ping-replica-period 
    
    cluster-migration-barrier 1 #集群迁移屏障,一个主节点至少拥有1个正常工作的从节点,即如果主节点的slave节点故障后会将多余的从节点分配到当前主节点成为其新的从节点。
    
    cluster-require-full-coverage yes #集群请求槽位全部覆盖,如果一个主库宕机且没有备库就会出现集群槽位不全,那么yes时redis集群槽位验证不全,就不再对外提供服务(对key赋值时,会出现CLUSTERDOWN The cluster is down的提示,cluster_state:fail,但ping 仍PONG),而no则可以继续使用,但是会出现查询数据查不到的情况(因为有数据丢失)。生产建议为no
    
    cluster-replica-no-failover no #如果为yes,此选项阻止在主服务器发生故障时尝试对其主服务器进行故障转移。 但是,主服务器仍然可以执行手动强制故障转移,一般为no
    
    #Slow log 是 Redis 用来记录超过指定执行时间的日志系统,执行时间不包括与客户端交谈,发送回复等I/O操作,而是实际执行命令所需的时间(在该阶段线程被阻塞并且不能同时为其它请求提供服务),由于slow log 保存在内存里面,读写速度非常快,因此可放心地使用,不必担心因为开启 slow log 而影响Redis 的速度
    
    slowlog-log-slower-than 10000 #以微秒为单位的慢日志记录,为负数会禁用慢日志,为0会记录每个命令操作。默认值为10ms,一般一条命令执行都在微秒级,生产建议设为1ms-10ms之间
    
    slowlog-max-len 128 #最多记录多少条慢日志的保存队列长度,达到此长度后,记录新命令会将最旧的命令从命令队列中删除,以此滚动删除,即,先进先出,队列固定长度,默认128,值偏小,生产建议设为1000以上
    

    二、RDB、AOF详解及优缺点总结

    1.RDB模式优缺点
    1.1.RDB 模式优点
    1.1.1.RDB快照保存了某个时间点的数据,可以通过脚本执行redis指令bgsave(非阻塞,后台执行)或者save(会阻塞写操作,不推荐)命令自定义时间点备份,可以保留多个备份,当出现问题可以恢复到不同时间点的版本,很适合备份,并且此文件格式也支持有不少第三方工具可以进行后续的数据分析。比如: 可以在最近的24小时内,每小时备份一次RDB文件,并且在每个月的每一天,也备份一个RDB文件。这样的话,即使遇上问题,也可以随时将数据集还原到不同的版本。
    
    1.1.2.RDB可以最大化Redis的性能,父进程在保存 RDB文件时唯一要做的就是fork出一个子进程,然后这个子进程就会处理接下来的所有保存工作,父进程无须执行任何磁盘工/0操作。
    
    1.1.3.RDB在大量数据,比如几个G的数据,恢复的速度比AOF的快
    
    1.2.RDB 模式缺点
    1.2.1.不能实时保存数据,可能会丢失自上一次执行RDB备份到当前的内存数据
    如果你需要尽量避免在服务器故障时丢失数据,那么RDB不适合你。虽然Redis允许你设置不同的保存点(save point)来控制保存RDB文件的频率,但是,因为RDB文件需要保存整个数据集的状态,所以它并不是一个轻松快速的操作。因此一般会超过5分钟以上才保存一次RDB文件。在这种情况下,一旦发生故障停机,你就可能会丢失好几分钟的数据。
    
    1.2.2.当数据量非常大的时候,从父进程fork子进程进行保存至RDB文件时需要一点时间,可能是毫秒或者秒,取决于磁盘IO性能
    
    1.2.3.在数据集比较庞大时,fork()可能会非常耗时,造成服务器在一定时间内停止处理客户端﹔如果数据集非常巨大,并且CPU时间非常紧张的话,那么这种停止时间甚至可能会长达整整一秒或更久。虽然 AOF重写也需要进行fork(),但无论AOF重写的执行间隔有多长,数据的持久性都不会有任何损失
    
    2.AOF模式优缺点
    2.1.AOF 模式优点
    2.1.1.数据安全性相对较高,根据所使用的fsync策略(fsync是同步内存中redis所有已经修改的文件到存储设备),默认是appendfsync everysec,即每秒执行一次 fsync,在这种配置下,Redis 仍然可以保持良好的性能,并且就算发生故障停机,也最多只会丢失一秒钟的数据( fsync会在后台线程执行,所以主线程可以继续努力地处理命令请求)
    
    2.1.2.由于该机制对日志文件的写入操作采用的是append模式,因此在写入过程中不需要seek, 即使出现宕机现象,也不会破坏日志文件中已经存在的内容。然而如果本次操作只是写入了一半数据就出现了系统崩溃问题,不用担心,在Redis下一次启动之前,可以通过 redis-check-aof 工具来解决数据一致性的问题
    
    2.1.3.Redis可以在 AOF文件体积变得过大时,自动地在后台对AOF进行重写,重写后的新AOF文件包含了恢复当前数据集所需的最小命令集合。整个重写操作是绝对安全的,因为Redis在创建新 AOF文件的过程中,append模式不断的将修改数据追加到现有的 AOF文件里面,即使重写过程中发生停机,现有的 AOF文件也不会丢失。而一旦新AOF文件创建完毕,Redis就会从旧AOF文件切换到新AOF文件,并开始新AOF文件进行追加操作。
    
    2.1.4.AOF包含一个格式清晰、易于理解的日志文件用于记录所有的修改操作。事实上,也可以通过该文件完成数据的重建AOF文件有序地保存了对数据库执行的所有写入操作,这些写入操作以Redis协议的格式保存,因此 AOF文件的内容非常容易被人读懂,对文件进行分析(parse)也很轻松。导出(export)AOF文件。也非常简单:举个例子,如果不小心执行了FLUSHALL.命令,但只要AOF文件未被重写,那么只要停止服务器,移除 AOF文件末尾的FLUSHAL命令,并重启Redis ,就可以将数据集恢复到FLUSHALL执行之前的状态。
    
    2.2.AOF 模式缺点
    2.2.1.即使有些操作是重复的也会全部记录,AOF 的文件大小要大于 RDB 格式的文件
    2.2.2.AOF 在恢复大数据集时的速度比 RDB 的恢复速度要慢
    2.2.3.根据fsync策略不同,AOF速度可能会慢于RDB
    2.2.4.bug 出现的可能性更多
    
    3.RDB和AOF 的选择
    3.1.如果主要充当缓存功能,或者可以承受数分钟数据的丢失, 通常生产环境一般只需启用RDB即可,此也是默认值
    3.2.如果数据需要持久保存,一点不能丢失,可以选择同时开启RDB和AOF。一般不建议只开启AOF
    

    三、Redis Cluster扩、缩容

    1、Redis Cluster扩容(Redis 5为例)
    	因公司业务发展迅猛,现有的三主三从的redis cluster架构可能无法满足现有业务的并发写入需求,因此公司紧急采购两台服务器10.0.0.68,10.0.0.78,需要将其动态添加到集群当中,但不能影响业务使用和数据丢失。
    	注意: 生产环境一般建议master节点为奇数个,比如:3,5,7,以防止脑裂现象
    1.1、#添加两个节点,增加Redis node节点,需要与之前的Redis node版本相同、配置一致,然后分别再启动两台Redis node,应为一主一从。
    
    #配置node7节点
    [root@redis-node7 ~]#dnf -y install redis
    [root@redis-node7 ~]#sed -i.bak -e 's/bind 127.0.0.1/bind 0.0.0.0/' -e 
    '/masterauth/a masterauth 123456' -e '/# requirepass/a requirepass 123456' -e 
    '/# cluster-enabled yes/a cluster-enabled yes' -e '/# cluster-config-file nodes-
    6379.conf/a cluster-config-file nodes-6379.conf' -e '/cluster-require-fullcoverage yes/c cluster-require-full-coverage no' /etc/redis.conf
    [root@redis-node7 ~]#systemctl enable --now redis
    #配置node8节点
    [root@redis-node8 ~]#dnf -y install redis
    [root@redis-node8 ~]#sed -i.bak -e 's/bind 127.0.0.1/bind 0.0.0.0/' -e 
    '/masterauth/a masterauth 123456' -e '/# requirepass/a requirepass 123456' -e 
    '/# cluster-enabled yes/a cluster-enabled yes' -e '/# cluster-config-file nodes-
    6379.conf/a cluster-config-file nodes-6379.conf' -e '/cluster-require-fullcoverage yes/c cluster-require-full-coverage no' /etc/redis.conf
    [root@redis-node8 ~]#systemctl enable --now redis
    
    1.2、 #添加新的master节点到集群
    #将一台新的主机10.0.0.68加入集群,以下示例中10.0.0.58可以是任意存在的集群节点
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster add-node 10.0.0.68:6379 <当前
    任意集群节点>:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    >>> Adding node 10.0.0.68:6379 to cluster 10.0.0.58:6379
    >>> Performing Cluster Check (using node 10.0.0.58:6379)
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[5461-10922] (5462 slots) master
       1 additional replica(s)
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[10923-16383] (5461 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[0-5460] (5461 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    >>> Send CLUSTER MEET to node 10.0.0.68:6379 to make it join the cluster.
    [OK] New node added correctly.
    #观察到该节点已经加入成功,但此节点上没有slot位,也无从节点,而且新的节点是master
    
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster info 10.0.0.8:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.8:6379 (cb028b83...) -> 6672 keys | 5461 slots | 1 slaves.
    10.0.0.68:6379 (d6e2eca6...) -> 0 keys | 0 slots | 0 slaves.
    10.0.0.48:6379 (d04e524d...) -> 6679 keys | 5462 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 6649 keys | 5461 slots | 1 slaves.
    [OK] 20000 keys in 5 masters.
    1.22 keys per slot on average.
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.8:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.8:6379 (cb028b83...) -> 6672 keys | 5461 slots | 1 slaves.
    10.0.0.68:6379 (d6e2eca6...) -> 0 keys | 0 slots | 0 slaves.
    10.0.0.48:6379 (d04e524d...) -> 6679 keys | 5462 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 6649 keys | 5461 slots | 1 slaves.
    [OK] 20000 keys in 5 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.8:6379)
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[0-5460] (5461 slots) master
       1 additional replica(s)
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots: (0 slots) master
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[5461-10922] (5462 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[10923-16383] (5461 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    
    [root@redis-node1 ~]#cat /var/lib/redis/nodes-6379.conf 
    d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379@16379 master - 0
    1582356107260 8 connected
    9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379@16379 slave 
    d34da8666a6f587283a1c2fca5d13691407f9462 0 1582356110286 6 connected
    f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379@16379 slave 
    cb028b83f9dc463d732f6e76ca6bbcd469d948a7 0 1582356108268 4 connected
    d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379@16379 master - 0
    1582356105000 7 connected 5461-10922
    99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379@16379 slave 
    d04e524daec4d8e22bdada7f21a9487c2d3e1057 0 1582356108000 7 connected
    d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379@16379 master - 0
    1582356107000 3 connected 10923-16383
    cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379@16379 myself,master - 0
    1582356106000 1 connected 0-5460
    vars currentEpoch 8 lastVoteEpoch 7 #和上面显示结果一样
    
    [root@redis-node1 ~]#redis-cli -a 123456 CLUSTER NODES
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379@16379 master - 0
    1582356313200 8 connected
    9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379@16379 slave 
    d34da8666a6f587283a1c2fca5d13691407f9462 0 1582356311000 6 connected
    f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379@16379 slave 
    cb028b83f9dc463d732f6e76ca6bbcd469d948a7 0 1582356314208 4 connected
    d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379@16379 master - 0
    1582356311182 7 connected 5461-10922
    99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379@16379 slave 
    d04e524daec4d8e22bdada7f21a9487c2d3e1057 0 1582356312000 7 connected
    d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379@16379 master - 0
    1582356312190 3 connected 10923-16383
    cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379@16379 myself,master - 0
    1582356310000 1 connected 0-5460
    
    #查看集群状态
    [root@redis-node1 ~]#redis-cli -a 123456 CLUSTER INFO
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    cluster_state:ok
    cluster_slots_assigned:16384
    cluster_slots_ok:16384
    cluster_slots_pfail:0
    cluster_slots_fail:0
    cluster_known_nodes:7
    cluster_size:3
    cluster_current_epoch:8
    cluster_my_epoch:1
    cluster_stats_messages_ping_sent:17442
    cluster_stats_messages_pong_sent:13318
    cluster_stats_messages_fail_sent:4
    cluster_stats_messages_auth-ack_sent:1
    cluster_stats_messages_sent:30765
    cluster_stats_messages_ping_received:13311
    cluster_stats_messages_pong_received:13367
    cluster_stats_messages_meet_received:7
    cluster_stats_messages_fail_received:1
    cluster_stats_messages_auth-req_received:1
    cluster_stats_messages_received:26687
    
    1.3、#在新的master上重新分配槽位
    #新的node节点加到集群之后,默认是master节点,但是没有slots,需要重新分配
    #添加主机之后需要对添加至集群种的新主机重新分片,否则其没有分片也就无法写入数据。
    #注意: 重新分配槽位需要清空数据,所以需要先备份数据,扩展后再恢复数据
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster reshard <当前任意集群节点>:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    >>> Performing Cluster Check (using node 10.0.0.68:6379)
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots: (0 slots) master
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[10923-16383] (5461 slots) master
       1 additional replica(s)
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[5461-10922] (5462 slots) master
       1 additional replica(s)
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[0-5460] (5461 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    M: f67f1c02c742cd48d3f48d8c362f9f1b9aa31549 10.0.0.78:6379
       slots: (0 slots) master
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    How many slots do you want to move (from 1 to 16384)?4096 #新分配多少个槽位=16384/master个数
    What is the receiving node ID? d6e2eca6b338b717923f64866bd31d42e52edc98 #新的master的ID
    Please enter all the source node IDs.
     Type 'all' to use all the nodes as source nodes for the hash slots.
     Type 'done' once you entered all the source nodes IDs.
    Source node #1: all #将哪些源主机的槽位分配给新的节点,all是自动在所有的redis node选择划分,如果是从redis cluster删除某个主机可以使用此方式将指定主机上的槽位全部移动到别的redis主机
    ......
    Do you want to proceed with the proposed reshard plan (yes/no)?  yes #确认分配
    ......
    Moving slot 12280 from 10.0.0.28:6379 to 10.0.0.68:6379: .
    Moving slot 12281 from 10.0.0.28:6379 to 10.0.0.68:6379: .
    Moving slot 12282 from 10.0.0.28:6379 to 10.0.0.68:6379: 
    Moving slot 12283 from 10.0.0.28:6379 to 10.0.0.68:6379: ..
    Moving slot 12284 from 10.0.0.28:6379 to 10.0.0.68:6379: 
    Moving slot 12285 from 10.0.0.28:6379 to 10.0.0.68:6379: .
    Moving slot 12286 from 10.0.0.28:6379 to 10.0.0.68:6379: 
    Moving slot 12287 from 10.0.0.28:6379 to 10.0.0.68:6379: ..
    [root@redis-node1 ~]# #确定slot分配成功
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.8:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.8:6379 (cb028b83...) -> 5019 keys | 4096 slots | 1 slaves.
    10.0.0.68:6379 (d6e2eca6...) -> 4948 keys | 4096 slots | 0 slaves.
    10.0.0.48:6379 (d04e524d...) -> 5033 keys | 4096 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 5000 keys | 4096 slots | 1 slaves.
    [OK] 20000 keys in 5 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.8:6379)
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[1365-5460] (4096 slots) master
       1 additional replica(s)
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master #可看到4096个slots
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[6827-10922] (4096 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[12288-16383] (4096 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    
    1.4、#为新的master添加新的slave节点
    #需要再向当前的Redis集群中添加一个Redis单机服务器10.0.0.78,用于解决当前10.0.0.68单机的潜在宕机问题,即实现响应的高可用功能。有两种方式:
    
    #方法1:在新加节点到集群时,直接将之设置为slave
    #查看当前状态
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.8:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.8:6379 (cb028b83...) -> 5019 keys | 4096 slots | 1 slaves.
    10.0.0.68:6379 (d6e2eca6...) -> 4948 keys | 4096 slots | 0 slaves.
    10.0.0.48:6379 (d04e524d...) -> 5033 keys | 4096 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 5000 keys | 4096 slots | 1 slaves.
    [OK] 20000 keys in 4 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.8:6379)
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[1365-5460] (4096 slots) master
       1 additional replica(s)
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[6827-10922] (4096 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[12288-16383] (4096 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    #直接加为slave节点
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster add-node 10.0.0.78:6379 10.0.0.8:6379 --cluster-slave --cluster-master-id d6e2eca6b338b717923f64866bd31d42e52edc98
    
    #验证是否成功
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.8:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.8:6379 (cb028b83...) -> 5019 keys | 4096 slots | 1 slaves.
    10.0.0.68:6379 (d6e2eca6...) -> 4948 keys | 4096 slots | 1 slaves.
    10.0.0.48:6379 (d04e524d...) -> 5033 keys | 4096 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 5000 keys | 4096 slots | 1 slaves.
    [OK] 20000 keys in 4 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.8:6379)
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[1365-5460] (4096 slots) master
       1 additional replica(s)
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
       1 additional replica(s)
    S: 36840d7eea5835ba540d9b64ec018aa3f8de6747 10.0.0.78:6379
       slots: (0 slots) slave
       replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
     replicates d34da8666a6f587283a1c2fca5d13691407f9462
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[6827-10922] (4096 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[12288-16383] (4096 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    [root@centos8 ~]#redis-cli -a 123456 -h 10.0.0.8 --no-auth-warning cluster info 
    cluster_state:ok
    cluster_slots_assigned:16384
    cluster_slots_ok:16384
    cluster_slots_pfail:0
    cluster_slots_fail:0
    cluster_known_nodes:8   #8个节点
    cluster_size:4          #4组主从
    cluster_current_epoch:11
    cluster_my_epoch:10
    cluster_stats_messages_ping_sent:1810
    cluster_stats_messages_pong_sent:1423
    cluster_stats_messages_auth-req_sent:5
    cluster_stats_messages_update_sent:14
    cluster_stats_messages_sent:3252
    cluster_stats_messages_ping_received:1417
    cluster_stats_messages_pong_received:1368
    cluster_stats_messages_meet_received:2
    cluster_stats_messages_fail_received:2
    cluster_stats_messages_auth-ack_received:2
    cluster_stats_messages_update_received:4
    cluster_stats_messages_received:2795
    
    #方法2:先将新节点加入集群,再修改为slave
    
    #为新的master添加slave节点
    #把10.0.0.78:6379添加到集群中:
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster add-node 10.0.0.78:6379 10.0.0.8:6379
    
    #更改新节点更改状态为slave:
    #需要手动将其指定为某个master的slave,否则其默认角色为master。
    [root@redis-node1 ~]#redis-cli -h 10.0.0.78 -p 6379 -a 123456 #登录到新添加节点
    10.0.0.78:6380> CLUSTER NODES #查看当前集群节点,找到目标master 的ID
    10.0.0.78:6380> CLUSTER REPLICATE 886338acd50c3015be68a760502b239f4509881c #将其设置slave,命令格式为cluster replicate MASTERID
    10.0.0.78:6380> CLUSTER NODES #再次查看集群节点状态,验证节点是否已经更改为指定master 的slave
    
    2、Redis Cluster缩容(Redis 5为例)
    	由于10.0.0.8服务器使用年限已经超过三年,已经超过厂商质保期而且硬盘出现异常报警,经运维部架构师提交方案并同开发同事开会商议,决定将现有Redis集群的8台主服务器中的master 10.0.0.8和对应的slave 10.0.0.38 临时下线,三台服务器的并发写入性能足够支出未来1-2年的业务需求。
    	删除节点过程:
    	添加节点的时候是先添加node节点到集群,然后分配槽位,删除节点的操作与添加节点的操作正好相反,是先将被删除的Redis node上的槽位迁移到集群中的其他Redis node节点上,然后再将其删除,如果一个Redis node节点上的槽位没有被完全迁移,删除该node的时候会提示有数据且无法删除。
    
    2.1、迁移master 的槽位之其他master
    注意: 被迁移Redis master源服务器必须保证没有数据,否则迁移报错并会被强制中断。
    #查看当前状态
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.8:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.8:6379 (cb028b83...) -> 5019 keys | 4096 slots | 1 slaves.
    10.0.0.68:6379 (d6e2eca6...) -> 4948 keys | 4096 slots | 1 slaves.
    10.0.0.48:6379 (d04e524d...) -> 5033 keys | 4096 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 5000 keys | 4096 slots | 1 slaves.
    [OK] 20000 keys in 4 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.8:6379)
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[1365-5460] (4096 slots) master
       1 additional replica(s)
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
       1 additional replica(s)
    S: 36840d7eea5835ba540d9b64ec018aa3f8de6747 10.0.0.78:6379
       slots: (0 slots) slave
       replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
      replicates d34da8666a6f587283a1c2fca5d13691407f9462
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[6827-10922] (4096 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[12288-16383] (4096 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    #连接到任意集群节点,#最后1365个slot从10.0.0.8移动到第一个master节点10.0.0.28上
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster reshard 10.0.0.18:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    >>> Performing Cluster Check (using node 10.0.0.18:6379)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    S: 36840d7eea5835ba540d9b64ec018aa3f8de6747 10.0.0.78:6379
       slots: (0 slots) slave
       replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots:[1365-5460] (4096 slots) master
       1 additional replica(s)
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
       1 additional replica(s)
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[6827-10922] (4096 slots) master
       1 additional replica(s)
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[12288-16383] (4096 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    How many slots do you want to move (from 1 to 16384)? 1356 #共4096/3分别给其它三个
    master节点
    What is the receiving node ID? d34da8666a6f587283a1c2fca5d13691407f9462 #master 
    10.0.0.28
    Please enter all the source node IDs.
     Type 'all' to use all the nodes as source nodes for the hash slots.
     Type 'done' once you entered all the source nodes IDs.
     Source node #1: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 #输入要删除10.0.0.8节点ID
    Source node #2: done
    Ready to move 1356 slots.
     Source nodes:
       M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
           slots:[1365-5460] (4096 slots) master
           1 additional replica(s)
     Destination node:
       M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
           slots:[12288-16383] (4096 slots) master
           1 additional replica(s)
     Resharding plan:
       Moving slot 1365 from cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    ......
     Moving slot 2719 from cb028b83f9dc463d732f6e76ca6bbcd469d948a7
       Moving slot 2720 from cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    Do you want to proceed with the proposed reshard plan (yes/no)? yes #确定
    ......
    Moving slot 2718 from 10.0.0.8:6379 to 10.0.0.28:6379: ..
    Moving slot 2719 from 10.0.0.8:6379 to 10.0.0.28:6379: .
    Moving slot 2720 from 10.0.0.8:6379 to 10.0.0.28:6379: ..
    #非交互式方式
    #再将1365个slot从10.0.0.8移动到第二个master节点10.0.0.48上
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster reshard 10.0.0.18:6379 --
    cluster-slots 1365 --cluster-from cb028b83f9dc463d732f6e76ca6bbcd469d948a7 --
    cluster-to d04e524daec4d8e22bdada7f21a9487c2d3e1057 --cluster-yes
    #最后的slot从10.0.0.8移动到第三个master节点10.0.0.68上
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster reshard 10.0.0.18:6379 --
    cluster-slots 1375 --cluster-from cb028b83f9dc463d732f6e76ca6bbcd469d948a7 --
    cluster-to d6e2eca6b338b717923f64866bd31d42e52edc98 --cluster-yes
    #确认10.0.0.8的所有slot都移走了,上面的slave也自动删除,成为其它master的slave 
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.8:6379
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.8:6379 (cb028b83...) -> 0 keys | 0 slots | 0 slaves.
    10.0.0.68:6379 (d6e2eca6...) -> 6631 keys | 5471 slots | 2 slaves.
    10.0.0.48:6379 (d04e524d...) -> 6694 keys | 5461 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 6675 keys | 5452 slots | 1 slaves.
    [OK] 20000 keys in 4 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.8:6379)
    M: cb028b83f9dc463d732f6e76ca6bbcd469d948a7 10.0.0.8:6379
       slots: (0 slots) master
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[4086-6826],[10923-12287] (5471 slots) master
       2 additional replica(s)
    S: 36840d7eea5835ba540d9b64ec018aa3f8de6747 10.0.0.78:6379
       slots: (0 slots) slave
       replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
        replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[2721-4085],[6827-10922] (5461 slots) master
       1 additional replica(s)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[1365-2720],[12288-16383] (5452 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    #原有的10.0.0.38自动成为10.0.0.68的slave
    [root@redis-node1 ~]#redis-cli -a 123456 -h 10.0.0.68 INFO replication
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    # Replication
    role:master
    connected_slaves:2
    slave0:ip=10.0.0.78,port=6379,state=online,offset=129390,lag=0
    slave1:ip=10.0.0.38,port=6379,state=online,offset=129390,lag=0
    master_replid:43e3e107a0acb1fd5a97240fc4b2bd8fc85b113f
    master_replid2:0000000000000000000000000000000000000000
    master_repl_offset:129404
    second_repl_offset:-1
    repl_backlog_active:1
    repl_backlog_size:1048576
    repl_backlog_first_byte_offset:1
    repl_backlog_histlen:129404
    [root@centos8 ~]#redis-cli -a 123456 -h 10.0.0.8 --no-auth-warning cluster info 
    cluster_state:ok
    cluster_slots_assigned:16384
    cluster_slots_ok:16384
    cluster_slots_pfail:0
    cluster_slots_fail:0
    cluster_known_nodes:8  #集群中8个节点
    cluster_size:3       #少了一个主从的slot
    cluster_current_epoch:16
    cluster_my_epoch:13
    cluster_stats_messages_ping_sent:3165
    cluster_stats_messages_pong_sent:2489
    cluster_stats_messages_fail_sent:6
    cluster_stats_messages_auth-req_sent:5
    cluster_stats_messages_auth-ack_sent:1
    cluster_stats_messages_update_sent:27
    cluster_stats_messages_sent:5693
    cluster_stats_messages_ping_received:2483
    cluster_stats_messages_pong_received:2400
    cluster_stats_messages_meet_received:2
    cluster_stats_messages_fail_received:2
    cluster_stats_messages_auth-req_received:1
    cluster_stats_messages_auth-ack_received:2
    cluster_stats_messages_update_received:4
    cluster_stats_messages_received:4894
    
    2.2、从集群删除服务器
    虽然槽位已经迁移完成,但是服务器IP信息还在集群当中,因此还需要将IP信息从集群删除
    注意: 删除服务器前,必须清除主机上面的槽位,否则会删除主机失败
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster del-node 10.0.0.8:6379 
    cb028b83f9dc463d732f6e76ca6bbcd469d948a7
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    >>> Removing node cb028b83f9dc463d732f6e76ca6bbcd469d948a7 from cluster 
    10.0.0.8:6379
    >>> Sending CLUSTER FORGET messages to the cluster...
    >>> SHUTDOWN the node. #删除节点后,redis进程自动关闭
    #删除节点信息
    [root@redis-node1 ~]#rm -f /var/lib/redis/nodes-6379.conf
    
    2.3、删除多余的slave节点验证结果
    #验证删除成功
    [root@redis-node1 ~]#ss -ntl
    State       Recv-Q       Send-Q   Local Address:Port     Peer Address:Port   
         
    LISTEN       0             128            0.0.0.0:22             0.0.0.0:*       
        
    LISTEN       0             128               [::]:22               [::]:*  
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.18:6379 
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.68:6379 (d6e2eca6...) -> 6631 keys | 5471 slots | 2 slaves.
    10.0.0.48:6379 (d04e524d...) -> 6694 keys | 5461 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 6675 keys | 5452 slots | 1 slaves.
    [OK] 20000 keys in 3 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.18:6379)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    S: f9adcfb8f5a037b257af35fa548a26ffbadc852d 10.0.0.38:6379
       slots: (0 slots) slave
       replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    S: 36840d7eea5835ba540d9b64ec018aa3f8de6747 10.0.0.78:6379
     slots: (0 slots) slave
       replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[4086-6826],[10923-12287] (5471 slots) master
       2 additional replica(s)
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[2721-4085],[6827-10922] (5461 slots) master
       1 additional replica(s)
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
       slots:[1365-2720],[12288-16383] (5452 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    #删除多余的slave从节点
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster del-node 10.0.0.18:6379 
    f9adcfb8f5a037b257af35fa548a26ffbadc852d
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    >>> Removing node f9adcfb8f5a037b257af35fa548a26ffbadc852d from cluster 
    10.0.0.18:6379
    >>> Sending CLUSTER FORGET messages to the cluster...
    >>> SHUTDOWN the node. #删除集群文件
    [root@redis-node4 ~]#rm -f /var/lib/redis/nodes-6379.conf 
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster check 10.0.0.18:6379 
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.68:6379 (d6e2eca6...) -> 6631 keys | 5471 slots | 1 slaves.
    10.0.0.48:6379 (d04e524d...) -> 6694 keys | 5461 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 6675 keys | 5452 slots | 1 slaves.
    [OK] 20000 keys in 3 masters.
    1.22 keys per slot on average.
    >>> Performing Cluster Check (using node 10.0.0.18:6379)
    S: 99720241248ff0e4c6fa65c2385e92468b3b5993 10.0.0.18:6379
       slots: (0 slots) slave
       replicates d04e524daec4d8e22bdada7f21a9487c2d3e1057
    S: 36840d7eea5835ba540d9b64ec018aa3f8de6747 10.0.0.78:6379
       slots: (0 slots) slave
       replicates d6e2eca6b338b717923f64866bd31d42e52edc98
    M: d6e2eca6b338b717923f64866bd31d42e52edc98 10.0.0.68:6379
       slots:[0-1364],[4086-6826],[10923-12287] (5471 slots) master
       1 additional replica(s)
    S: 9875b50925b4e4f29598e6072e5937f90df9fc71 10.0.0.58:6379
       slots: (0 slots) slave
       replicates d34da8666a6f587283a1c2fca5d13691407f9462
    M: d04e524daec4d8e22bdada7f21a9487c2d3e1057 10.0.0.48:6379
       slots:[2721-4085],[6827-10922] (5461 slots) master
       1 additional replica(s)
    M: d34da8666a6f587283a1c2fca5d13691407f9462 10.0.0.28:6379
     slots:[1365-2720],[12288-16383] (5452 slots) master
       1 additional replica(s)
    [OK] All nodes agree about slots configuration.
    >>> Check for open slots...
    >>> Check slots coverage...
    [OK] All 16384 slots covered.
    [root@redis-node1 ~]#redis-cli -a 123456 --cluster info 10.0.0.18:6379 
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    10.0.0.68:6379 (d6e2eca6...) -> 6631 keys | 5471 slots | 1 slaves.
    10.0.0.48:6379 (d04e524d...) -> 6694 keys | 5461 slots | 1 slaves.
    10.0.0.28:6379 (d34da866...) -> 6675 keys | 5452 slots | 1 slaves.
    [OK] 20000 keys in 3 masters.
    1.22 keys per slot on average.
    #查看集群信息
    [root@redis-node1 ~]#redis-cli -a 123456 -h 10.0.0.18 CLUSTER INFO
    Warning: Using a password with '-a' or '-u' option on the command line interface 
    may not be safe.
    cluster_state:ok
    cluster_slots_assigned:16384
    cluster_slots_ok:16384
    cluster_slots_pfail:0
    cluster_slots_fail:0
    cluster_known_nodes:6  #只有6个节点
    cluster_size:3
    cluster_current_epoch:11
    cluster_my_epoch:10
    cluster_stats_messages_ping_sent:12147
    cluster_stats_messages_pong_sent:12274
    cluster_stats_messages_update_sent:14
    cluster_stats_messages_sent:24435
    cluster_stats_messages_ping_received:12271
    cluster_stats_messages_pong_received:12147
    cluster_stats_messages_meet_received:3
    cluster_stats_messages_update_received:28
    cluster_stats_messages_received:24449
    

    四、LVS调试算法总结

    1.LVS 调试算法
    ipvs scheduler:根据其调度时是否考虑各RS当前的负载状态
    分为两种:静态方法和动态方法
    
    1.1静态方法
    仅根据算法本身进行调度
    1、RR:roundrobin,轮询,较常用
    2、WRR:Weighted RR,加权轮询,较常用
    3、SH:Source Hashing,实现session sticky,源IP地址hash;将来自于同一个IP地址的请求始终发往第一次挑中的RS,从而实现会话绑定
    4、DH:Destination Hashing;目标地址哈希,第一次轮询调度至RS,后续将发往同一个目标地址的请求始终转发至第一次挑中的RS,典型使用场景是正向代理缓存场景中的负载均衡,如: Web缓存
    
    1.2动态方法
    主要根据每RS当前的负载状态及调度算法进行调度Overhead=value 较小的RS将被调度 
    1、LC:least connections 适用于长连接应用
    Overhead=activeconns*256+inactiveconns
    2、WLC:Weighted LC,默认调度方法,较常用
    Overhead=(activeconns*256+inactiveconns)/weight
    3、SED:Shortest Expection Delay,初始连接高权重优先,只检查活动连接,而不考虑非活动连接
    Overhead=(activeconns+1)*256/weight
    4、NQ:Never Queue,第一轮均匀分配,后续SED
    5、LBLC:Locality-Based LC,动态的DH算法,使用场景:根据负载状态实现正向代理,实现Web Cache等 6、LBLCR:LBLC with Replication,带复制功能的LBLC,解决LBLC负载不均衡问题,从负载重的复制到负载轻的RS,,实现Web Cache等
    
    1.3内核版本 4.15 版本后新增调度算法:FO和OVF
    FO(Weighted Fail Over)调度算法,在此FO算法中,遍历虚拟服务所关联的真实服务器链表,找到还未过载(未设置IP_VS_DEST_F_OVERLOAD标志)的且权重最高的真实服务器,进行调度
    
    OVF(Overflow-connection)调度算法,基于真实服务器的活动连接数量和权重值实现。将新连接调度到权重值最高的真实服务器,直到其活动连接数量超过权重值,之后调度到下一个权重值最高的真实服务器,在此OVF算法中,遍历虚拟服务相关联的真实服务器链表,找到权重值最高的可用真实服务器。一个可用的真实服务器需要同时满足以下条件:
    未过载(未设置IP_VS_DEST_F_OVERLOAD标志)
    真实服务器当前的活动连接数量小于其权重值
    其权重值不为零
    

    五、LVS的NAT、DR模型实现

    1、LVS-NAT模式实现
    环境:
    
    共四台主机
    一台: internet client:192.168.10.6/24   GW:无 仅主机
    
    一台:lvs  
    eth1 仅主机 192.168.10.100/16
    eth0 NAT 10.0.0.8/24
    
    两台RS:
    RS1: 10.0.0.7/24 GW:10.0.0.8 NAT
    RS2: 10.0.0.17/24 GW:10.0.0.8 NAT
    
    #client网卡配置:
    [root@internet ~]#cat /etc/sysconfig/network-scripts/ifcfg-eth0 
    DEVICE=eth0
    NAME=eth0
    BOOTPROTO=static
    IPADDR=192.168.10.6
    PREFIX=24
    ONBOOT=yes
    
    #lvs网卡配置:
    [root@lvs network-scripts]#cat ifcfg-eth0
    DEVICE=eth0
    NAME=eth0
    BOOTPROTO=static
    IPADDR=10.0.0.8
    PREFIX=24
    ONBOOT=yes
    
    [root@lvs network-scripts]#cat ifcfg-eth1
    DEVICE=eth1
    NAME=eth1
    BOOTPROTO=static
    IPADDR=192.168.10.100
    PREFIX=24
    ONBOOT=yes
    
    #后端RS1网卡配置:
    [root@rs1 ~]#cat /etc/sysconfig/network-scripts/ifcfg-eth0
    DEVICE=eth0
    NAME=eth0
    BOOTPROTO=static
    IPADDR=10.0.0.7
    PREFIX=24
    GATEWAY=10.0.0.8
    ONBOOT=yes
    
    #后端RS2网卡配置
    [root@rs2 ~]#cat /etc/sysconfig/network-scripts/ifcfg-eth0 
    DEVICE=eth0
    NAME=eth0
    BOOTPROTO=static
    IPADDR=10.0.0.17
    PREFIX=24
    GATEWAY=10.0.0.8
    ONBOOT=yes
    
    #配置RS1的网站,实际生产环境应该配置RS1和RS2网页一样,这里实验方便观察所以配置不一样。
    [root@rs1 ~]#curl 10.0.0.7
    10.0.0.7 RS1
    
    #配置RS2的网站
    [root@rs2 ~]#curl 10.0.0.17
    10.0.0.17 RS2
    
    #修改内核参数,开启流量转发
    [root@lvs-server ~]#vim /etc/sysctl.conf
    net.ipv4.ip_forward = 1
    
    #使生效
    [root@lvs-server ~]#sysctl -p
    net.ipv4.ip_forward = 1
    
    #配置LVS集群及添加RS服务器
    [root@lvs-server ~]#ipvsadm -A -t 192.168.10.100:80 -s wrr 
    [root@lvs-server ~]#ipvsadm -a -t 192.168.10.100:80 -r 10.0.0.7:80 -m
    [root@lvs-server ~]#ipvsadm -a -t 192.168.10.100:80 -r 10.0.0.17:80 -m
    
    #查看lvs规则
    [root@lvs-server ~]#ipvsadm -Ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  192.168.10.100:80 wrr
      -> 10.0.0.7:80                 Masq    1      1          0         
      -> 10.0.0.17:80                 Masq    1      0          0   
    [root@internet ~]#while :;do curl 192.168.10.100;sleep 0.5;done
    rs1.magedu.org
    rs2.magedu.org
    rs1.magedu.org
    rs2.magedu.org
    rs1.magedu.org
    rs2.magedu.org
    [root@lvs-server ~]#ipvsadm -Ln --stats
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port               Conns   InPkts OutPkts InBytes OutBytes
      -> RemoteAddress:Port
    TCP  192.168.10.100:80                  67      405      255    32436    30092
      -> 10.0.0.7:80                        34      203      128    16244    15072
      -> 10.0.0.17:80                       33      202      127    16192    15020
    [root@lvs-server ~]#cat /proc/net/ip_vs
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port Forward Weight ActiveConn InActConn
     TCP C0A80A64:0050 wrr  
      -> 0A000011:0050     Masq    1      0          98        
      -> 0A000007:0050     Masq    1      0          97  
      
    [root@lvs-server ~]#ipvsadm -Lnc
    IPVS connection entries
    pro expire state       source             virtual           destination
    TCP 01:55 TIME_WAIT   192.168.10.6:43486 192.168.10.100:80  10.0.0.17:80
    TCP 00:19 TIME_WAIT   192.168.10.6:43476 192.168.10.100:80  10.0.0.7:80
    TCP 01:58 TIME_WAIT   192.168.10.6:43500 192.168.10.100:80  10.0.0.7:80
    TCP 01:58 TIME_WAIT   192.168.10.6:43498 192.168.10.100:80  10.0.0.17:80
    TCP 01:59 TIME_WAIT   192.168.10.6:43502 192.168.10.100:80  10.0.0.17:80
    TCP 01:57 TIME_WAIT   192.168.10.6:43494 192.168.10.100:80  10.0.0.17:80
    TCP 01:57 TIME_WAIT   192.168.10.6:43496 192.168.10.100:80  10.0.0.7:80
    TCP 01:56 TIME_WAIT   192.168.10.6:43490 192.168.10.100:80  10.0.0.17:80
    TCP 00:20 TIME_WAIT   192.168.10.6:43480 192.168.10.100:80  10.0.0.7:80
    TCP 01:56 TIME_WAIT   192.168.10.6:43492 192.168.10.100:80  10.0.0.7:80
    TCP 01:55 TIME_WAIT   192.168.10.6:43488 192.168.10.100:80  10.0.0.7:80
    TCP 00:20 TIME_WAIT   192.168.10.6:43478 192.168.10.100:80  10.0.0.17:80
    TCP 01:59 TIME_WAIT   192.168.10.6:43504 192.168.10.100:80  10.0.0.7:80
    TCP 01:54 TIME_WAIT   192.168.10.6:43484 192.168.10.100:80  10.0.0.7:80
    TCP 01:54 TIME_WAIT   192.168.10.6:43482 192.168.10.100:80  10.0.0.17:80
    
    [root@lvs-server ~]#cat /proc/net/ip_vs_conn
    Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Expires PEName PEData
    TCP C0A80A06 A9DE C0A80A64 0050 0A000011 0050 TIME_WAIT        72
    TCP C0A80A06 A9EC C0A80A64 0050 0A000007 0050 TIME_WAIT        76
    TCP C0A80A06 AA64 C0A80A64 0050 0A000007 0050 TIME_WAIT       106
    TCP C0A80A06 AA0C C0A80A64 0050 0A000007 0050 TIME_WAIT        84
    TCP C0A80A06 AA3A C0A80A64 0050 0A000011 0050 TIME_WAIT        95
    TCP C0A80A06 AA86 C0A80A64 0050 0A000011 0050 TIME_WAIT       115
    TCP C0A80A06 AA78 C0A80A64 0050 0A000007 0050 TIME_WAIT       111
    TCP C0A80A06 AA06 C0A80A64 0050 0A000011 0050 TIME_WAIT        82
    TCP C0A80A06 AA44 C0A80A64 0050 0A000007 0050 TIME_WAIT        98
    TCP C0A80A06 AA2C C0A80A64 0050 0A000007 0050 TIME_WAIT        92
    
    #保存规则
    [root@lvs-server ~]#ipvsadm -Sn > /etc/sysconfig/ipvsadm
    [root@lvs-server ~]#systemctl enable --now ipvsadm.service 
    
    
    2、LVS的跨网络DR实现
    #环境准备:
    5台主机
    客户端client       vmnet1仅主机网络 eth0:192.169.33.160/24 GW:192.168.33.200
    路由器router        vmnet1仅主机网络 eth1:192.169.33.200/24
    			     vmnet8 NAT网络 eth0: 10.0.0.200/24  eth0:1: 192.168.0.200/24
    负载调度器LVS      vip:lo 192.168.0.100/32  
    			     DIP:eth0 NAT 10.0.0.150/24 GW:10.0.0.200
    后端web服务器RS1   vip:lo 192.168.0.100/32  
    			     DIP:eth0 NAT 10.0.0.160/24 GW:10.0.0.200
    后端web服务器RS2   vip:lo 192.168.0.100/32  
    			     DIP:eth0 NAT 10.0.0.170/24 GW:10.0.0.200
    			     
    #client:
    [root@client ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
    TYPE=Ethernet
    PROXY_METHOD=none
    BROWSER_ONLY=no
    BOOTPROTO=static
    DEFROUTE=yes
    NAME=eth0
    DEVICE=eth0
    ONBOOT=yes
    IPADDR=192.168.33.160
    PREFIX=24
    GATEWAY=192.168.33.200
    
    [root@client ~]# systemctl restart network
    
    #router:
    [root@router ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
    TYPE=Ethernet
    BOOTPROTO=static
    DEFROUTE=yes
    NAME=eth0
    DEVICE=eth0
    ONBOOT=yes
    IPADDR=10.0.0.200
    PREFIX=24
    
    [root@router ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
    TYPE=Ethernet
    BOOTPROTO=static
    DEFROUTE=yes
    NAME=eth1
    DEVICE=eth1
    ONBOOT=yes
    IPADDR=192.168.33.200
    PREFIX=24
    
    [root@router ~]# systemctl restart network
    #添加临时eth0网卡子接口eth0:1配置ip地址。
    [root@router ~]# ip a add 192.168.0.200/24 dev eth0
    #测试仅主机网络,和客户端正常通信
    [root@router ~]# ping 192.168.33.160
    PING 192.168.33.160 (192.168.33.160) 56(84) bytes of data.
    64 bytes from 192.168.33.160: icmp_seq=1 ttl=64 time=0.490 ms
    64 bytes from 192.168.33.160: icmp_seq=2 ttl=64 time=0.859 ms
    
    #配置开启路由转发功能
    [root@router ~]# echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
    [root@router ~]# sysctl -p
    net.ipv4.ip_forward = 1
    [root@router ~]# cat /proc/sys/net/ipv4/ip_forward
    1
    
    #LVS:
    #网络配置
    [root@lvs ~]#vim /etc/sysconfig/network-scripts/ifcfg-eth0
    TYPE=Ethernet
    BOOTPROTO=static
    NAME=eth0
    DEVICE=eth0
    IPADDR=10.0.0.150
    PREFIX=24
    GATEWAY=10.0.0.200
    ONBOOT=yes
    [root@lvs ~]#nmcli c reload
    [root@lvs ~]#nmcli c up eth0
    [root@lvs ~]#route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         10.0.0.200      0.0.0.0         UG    100    0        0 eth0
    10.0.0.0        0.0.0.0         255.255.255.0   U     100    0        0 eth0
    [root@lvs ~]#ping 192.168.33.160
    PING 192.168.33.160 (192.168.33.160) 56(84) bytes of data.
    64 bytes from 192.168.33.160: icmp_seq=1 ttl=63 time=0.919 ms
    64 bytes from 192.168.33.160: icmp_seq=2 ttl=63 time=4.51 ms
    
    #配置vip
    [root@lvs ~]#ifconfig  lo:1 192.168.0.100 netmask 255.255.255.255
    
    
    
    #RS1:
    #网络配置
    [root@RS1 ~]#vim /etc/sysconfig/network-scripts/ifcfg-eth0
    TYPE=Ethernet
    BOOTPROTO=static
    NAME=eth0
    DEVICE=eth0
    IPADDR=10.0.0.160
    PREFIX=24
    GATEWAY=10.0.0.200
    ONBOOT=yes
    [root@RS1 ~]#nmcli c reload
    [root@RS1 ~]#nmcli c up eth0
    [root@RS1 ~]#route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         10.0.0.200      0.0.0.0         UG    100    0        0 eth0
    10.0.0.0        0.0.0.0         255.255.255.0   U     100    0        0 eth0
    
    #web服务配置
    [root@RS1 ~]#yum install -y httpd
    [root@RS1 ~]#echo 10.0.0.160 >> /var/www/html/index.html
    [root@RS1 ~]#systemctl restart httpd
    [root@RS1 ~]#curl localhost
    10.0.0.160
    
    #IPVS配置
    [root@RS1 ~]#echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
    [root@RS1 ~]#echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
    [root@RS1 ~]#echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    [root@RS1 ~]#echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
    #配置vip
    [root@RS1 ~]#ifconfig  lo:1 192.168.0.100 netmask 255.255.255.255
    
    
    #RS2:
    #网络配置
    [root@RS2 ~]#vim /etc/sysconfig/network-scripts/ifcfg-eth0
    TYPE=Ethernet
    BOOTPROTO=static
    NAME=eth0
    DEVICE=eth0
    IPADDR=10.0.0.170
    PREFIX=24
    GATEWAY=10.0.0.200
    ONBOOT=yes
    [root@RS2 ~]#nmcli c reload
    [root@RS2 ~]#nmcli c up eth0
    
    #web服务配置
    [root@RS2 ~]#yum install -y httpd
    [root@RS2 ~]#echo 10.0.0.170 >> /var/www/html/index.html
    [root@RS2 ~]#systemctl restart httpd
    [root@RS2 ~]#curl localhost
    10.0.0.170
    
    #IPVS配置
    [root@RS2 ~]#echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
    [root@RS2 ~]#echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
    [root@RS2 ~]#echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    [root@RS2 ~]#echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
    #配置vip
    [root@RS2 ~]#ifconfig  lo:1 192.168.0.100 netmask 255.255.255.255
    
    #LVS集群配置:
    [root@lvs ~]#yum install -y ipvsadm-1.31-1.el8.x86_64.rpm
    [root@lvs ~]#ipvsadm  -A -t 192.168.0.100:80 -s wrr
    [root@lvs ~]#ipvsadm  -a -t 192.168.0.100:80 -r 10.0.0.160 -g -w 1
    [root@lvs ~]#ipvsadm  -a -t 192.168.0.100:80 -r 10.0.0.170 -g -w 1
    
    #测试client:
    [root@client ~]# curl 192.168.0.100
    10.0.0.170
    [root@client ~]# curl 192.168.0.100
    10.0.0.160
    [root@client ~]# curl 192.168.0.100
    10.0.0.170
    [root@client ~]# curl 192.168.0.100
    10.0.0.160
  • 相关阅读:
    Java8 Time
    Java8 Stream
    Java8 Lambda
    Thinking in java 阅读
    String 中的 split 进行字符串分割
    Kubernetes 学习(九)Kubernetes 源码阅读之正式篇------核心组件之 Scheduler
    Kubernetes 学习(八)Kubernetes 源码阅读之初级篇------源码及依赖下载
    Golang(八)go modules 学习
    SQLAIchemy(二)ORM 相关
    SQLAIchemy 学习(一)Session 相关
  • 原文地址:https://www.cnblogs.com/tanll/p/16098761.html
Copyright © 2020-2023  润新知