• Ceph集群rbd-mirror A、B区域备份实施方案


    Ceph集群rbd-mirror A、B区域备份实施方案

    备注:首先准备两个集群, 并确认其状态,集群的准备过程在这就不做陈述
    1.查看集群状态
    A区域
    [root@ceph2111 ceph]# ceph -s
        cluster 05440e8c-bbd9-4dca-b4e7-c0c88b3fa270
         health HEALTH_OK
         monmap e5: 3 mons at {ceph2111=10.10.2.111:6789/0,ceph2112=10.10.2.112:6789/0,ceph2113=10.10.2.113:6789/0}
                election epoch 44, quorum 0,1,2 ceph2111,ceph2112,ceph2113
         osdmap e63: 4 osds: 3 up, 3 in
                flags sortbitwise
          pgmap v4121: 64 pgs, 1 pools, 557 bytes data, 14 objects
                111 MB used, 45935 MB / 46046 MB avail
                      64 active+clean
    [root@ceph2111 ceph]# 
    
    B区域
    
    [root@ceph111 ceph]# ceph -s
        cluster e091a437-85a5-4561-b445-ef67ea98cfb2
         health HEALTH_OK
         monmap e1: 3 mons at {ceph111=10.10.1.111:6789/0,ceph112=10.10.1.112:6789/0,ceph115=10.10.1.115:6789/0}
                election epoch 18, quorum 0,1,2 ceph111,ceph112,ceph115
         osdmap e40: 6 osds: 6 up, 6 in
                flags sortbitwise
          pgmap v293: 64 pgs, 1 pools, 555 bytes data, 14 objects
                205 MB used, 30448 MB / 30653 MB avail
                      64 active+clean
      client io 0 B/s rd, 0 op/s rd, 0 op/s wr
    
    2.在两个集群里都安装tbd-mirror rpm包
    
    yum install rbd-mirror
    
    3.在A和B两端都启动rbd-mirror的进程
    
    [root@ceph2111 ceph]# rbd-mirror -m 10.10.2.111 -d
    2016-10-27 12:17:28.140639 7f788b294c40  0 ceph version 10.2.3 (ecc23778eb545d8dd55e2e4735b53cc93f92e65b), process rbd-mirror, pid 1867
    
    这里是用nohup 在后台运行
    A区域
    [root@ceph2111 ceph]# nohup rbd-mirror -m 10.10.2.111 -d > /dev/null 2>&1 &
    [1] 1953
    
    B区域
    [root@ceph111 ~]# nohup rbd-mirror -m 10.10.1.111 -d > /dev/null 2>&1 &
    [1] 15408
    
    
    确认是否在运行
    
    [root@ceph2111 ceph]# ps -ef|grep rbd
    root      1953 30723  0 12:19 pts/0    00:00:00 rbd-mirror -m 10.10.2.111 -d
    root      1984 30723  0 12:19 pts/0    00:00:00 grep --color=auto rbd
    
    
    4.在ceph.conf 里添加 rbd_default_features = 125
    
    A区域
    [root@ceph2111 ceph]# vi ceph.conf 
    [global]
    fsid = 05440e8c-bbd9-4dca-b4e7-c0c88b3fa270
    mon_initial_members = ceph2111, ceph2112, ceph2113
    mon_host = 10.10.2.111,10.10.2.112,10.10.2.113
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx
    osd pool default size = 2
    public network = 10.10.2.0/24
    rbd_default_features = 125
    ~
    "ceph.conf" 10L, 323C written
    
    B区域
    [root@ceph111 my_cluster]# vi ceph.conf 
    [global]
    fsid = e091a437-85a5-4561-b445-ef67ea98cfb2
    mon_initial_members = ceph111, ceph112, ceph115
    mon_host = 10.10.1.111,10.10.1.112,10.10.1.115
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx
    osd pool default size = 2
    public network = 10.10.1.0/24
    rbd_default_features = 125
    
    
    
    5.对A进行设置
    
    [root@ceph2111 ceph]# cp ceph.conf local.conf
    [root@ceph2111 ceph]# cp ceph.client.admin.keyring local.client.admin.keyring
    
    把A区域 cp到B区域环境中
    
    [root@ceph2111 ceph]# scp local.conf local.client.admin.keyring root@10.10.1.111:/etc/ceph/
    local.conf                                                                                                                              100%  323     0.3KB/s   00:00    
    local.client.admin.keyring  
       
    在A 集群里进行 同步配置
    
    [root@ceph2111 ceph]# ceph-deploy --overwrite-conf admin ceph{2111..2114}
    
    检验上面设置是否完成                                                                                                             
    
    A区域
    [root@ceph2111 ceph]# ceph --cluster local mon stat
    e5: 3 mons at {ceph2111=10.10.2.111:6789/0,ceph2112=10.10.2.112:6789/0,ceph2113=10.10.2.113:6789/0}, election epoch 44, quorum 0,1,2 ceph2111,ceph2112,ceph2113
    [root@ceph2111 ceph]# ceph --cluster remote mon stat
    e1: 3 mons at {ceph111=10.10.1.111:6789/0,ceph112=10.10.1.112:6789/0,ceph115=10.10.1.115:6789/0}, election epoch 18, quorum 0,1,2 ceph111,ceph112,ceph115
    
    B区域
    
    root@ceph111 ceph]# cp ceph.conf remote.conf
    [root@ceph111 ceph]# cp ceph.client.admin.keyring remote.client.admin.keyring
    [root@ceph111 ceph]# ls
    ceph.client.admin.keyring  ceph.conf  local.client.admin.keyring  local.conf  rbdmap  remote.client.admin.keyring  remote.conf  tmpr_jrwC  tmptBCcx2
    [root@ceph111 ceph]# scp remote.c
    remote.client.admin.keyring  remote.conf                  
    [root@ceph111 ceph]# scp remote.c* root@10.10.2.111:/etc/ceph/
    remote.client.admin.keyring                                                                                                             100%  129     0.1KB/s   00:00    
    remote.conf     
    
    [root@ceph111 ceph]# ceph --cluster local mon stat
    e5: 3 mons at {ceph2111=10.10.2.111:6789/0,ceph2112=10.10.2.112:6789/0,ceph2113=10.10.2.113:6789/0}, election epoch 44, quorum 0,1,2 ceph2111,ceph2112,ceph2113
    [root@ceph111 ceph]# ceph --cluster remote mon stat
    e1: 3 mons at {ceph111=10.10.1.111:6789/0,ceph112=10.10.1.112:6789/0,ceph115=10.10.1.115:6789/0}, election epoch 18, quorum 0,1,2 ceph111,ceph112,ceph115
    
    
                                                                                                                            100%  320     0.3KB/s   00:00    
    
    
    6.准备工作完成了,开始做相关配置 
    A区域
    [root@ceph2111 ceph]# rbd --cluster local mirror pool enable rbd pool
    [root@ceph2111 ceph]# rbd --cluster remote mirror pool enable rbd pool
    
    如果需要关闭那么执行
    
    1.    rbd --cluster local mirror pool disable rbd
    2.    rbd --cluster remote mirror pool disable rbd
    
    
    
    增加 CLUSTER PEER
    格式
    rbd mirror pool peer add {pool-name} {client-name}@{cluster-name} 
    使用admin这个账户就可以了
    
    执行
    A区域
    
    [root@ceph2111 ceph]# rbd --cluster local mirror pool peer add rbd client.admin@remote
    d79c69c5-5fac-4bcb-8e93-28a18994d637
    [root@ceph2111 ceph]# rbd --cluster remote mirror pool peer add rbd client.admin@local
    9dd66d08-89e4-4c91-a389-649b9c6940de
    
    7.查询是否连接好
    A区域
    
    [root@ceph2111 ceph]# rbd --cluster local mirror pool info
    Mode: pool
    Peers: 
      UUID                                 NAME   CLIENT       
      d79c69c5-5fac-4bcb-8e93-28a18994d637 remote client.admin 
    [root@ceph2111 ceph]# rbd --cluster remote mirror pool info
    Mode: pool
    Peers: 
      UUID                                 NAME  CLIENT       
      9dd66d08-89e4-4c91-a389-649b9c6940de local client.admin 
    
    如果需要删除
    rbd mirror pool peer remove {pool-name} {peer-uuid}
    
    执行
    1.    rbd --cluster local mirror pool peer remove image-pool  uuid
    2.    rbd --cluster remote mirror pool peer remove image-pool uuid
    
    8.验证是否成功
    
    A区域
    [root@ceph2111 ceph]# rbd create test2111 --size 400
    
    开启tbd-mirror pool enable 功能
    
    [root@ceph2111 ceph]# rbd-mirror pool enable
    [root@ceph2111 ceph]# rbd ls
    test111
    test2111
    [root@ceph2111 ceph]# 
    
    B区域
    [root@ceph111 ceph]# rbd create test111 --size 400
    [root@ceph111 ceph]# rbd ls
    
    [root@ceph111 ceph]# rbd-mirror pool enable
    [root@ceph111 ceph]# rbd ls
    test111
    test2111
    
    
    
    9.备注镜像的升级与降级
    
    1.    ceph]# rbd --cluster local mirror image demote rbd/test111
    2.    ceph]# rbd --cluster local mirror image promote rbd/test111
    3.    ceph]# rbd --cluster local mirror image promote rbd/test111
    4.    rbd: error promoting image to primary
    5.    2016-03-30 23:35:13.477096 7ffa50a3dc00 -1 librbd: image is already primary
  • 相关阅读:
    【转】Selenium模拟JQuery滑动解锁
    【转】nose-parameterized是Python单元测试框架实现参数化的扩展
    【转】Chrome headless 模式
    RobotFramework:App九宫格滑动解锁
    appium九宫格解锁错误提示:The coordinates provided to an interactions operation are invalid解决办法
    RobotFramework:App滑动屏幕
    robotframework:appium切换webview后,在webview里滑动屏幕
    robotframework:appium切换webview后,在第一个页面操作成功,跳转到第二个页面后,执行命令失败
    robotframework之APP混合H5自动化测试
    Allure生成测试报告
  • 原文地址:https://www.cnblogs.com/sxwen/p/8042885.html
Copyright © 2020-2023  润新知