• 在线做RAID命令


    # 安装raid卡管理工具
    wget http://10.12.30.102:10800/other/MegaCli-8.07.14-1.noarch.rpm -O /tmp/MegaCli-8.07.14-1.noarch.rpm
    rpm -ivh /tmp/MegaCli-8.07.14-1.noarch.rpm
    ln -sf /opt/MegaRAID/MegaCli/MegaCli64 /usr/local/bin/megacli
    
    #查看所有RAID卡信息
    megacli -AdpAllInfo -aALL | grep 'Adapter #'
    
    #查看RAID卡编号
    megacli -AdpAllInfo -aALL | grep 'Adapter #'
    
    #查看RAID卡的个数
    megacli -AdpAllInfo -aALL | grep 'Adapter #' | wc -l
    
    #查看RAID控制卡信息(为了获取Enclosure Device ID,如果有多个RAID控制器,请确认需要创建RAID的物理磁盘连接在哪个RAID控制器,然后获取ID,不同控制器下的磁盘不能创建在同一个GROUP)
    megacli  -cfgdsply -aALL | grep 'Enclosure Device ID'
    
    #查看RAID情况
    megacli -LDInfo -LALL -a0
    
    #查看所有物理磁盘信息
    megacli -PDList -a0
    megacli -PDList -a0 | grep 'Slot Number:' | awk '{print $3}'
    
    #检查磁盘是否有错误
    megacli -PDList -a0 | grep 'Media Error Count:' | nl
    
    查看单个盘的详细信息
    megacli -pdInfo -PhysDrv[32:3] -a0
    
    #查看哪块盘没有做raid
    for id in $(megacli  -PDList -a0 | grep 'Slot Number:' | awk '{print $3}');do megacli  -pdInfo -PhysDrv[$(megacli  -cfgdsply -aALL | grep 'Enclosure Device ID' | awk '{print $4}' | head -n1):${id}] -a0|head -n4;done
    
    #获取所有raid组的编号
    megacli -LDInfo -LALL -a0 | grep 'Target Id' | awk '{print $3}'
    
    #确定系统盘
    [[ $(megacli -PDList -a0 | grep -E -A 12 'Slot Number: 12|Slot Number: 13' | grep -c 'GB') -eq 2 ]] || exit 1
    
    
    
    for id in $(megacli -PDList -a0 | grep 'Slot Number:' | awk '{print $3}');do megacli -pdInfo -PhysDrv[$(megacli -cfgdsply -a0 | grep 'Enclosure Device ID' | head -n1 | awk '{print $4}'):${id}] -a0 | egrep "Enclosure Device ID|Slot Number|Drive's position";done
    
    
    for id in $(megacli -PDList -a0 | grep 'Slot Number:' | awk '{print $3}');do megacli -pdInfo -PhysDrv[$(megacli -cfgdsply -a0 | grep 'Enclosure Device ID' | head -n1 | awk '{print $4}'):${id}] -a0 | egrep "Enclosure Device ID|Slot Number|Drive's position";done
    
    
    #在线做RAID10
    megacli -CfgSpanAdd -r10 -Array0[252:4,252:5] -Array1[252:6,252:7] -Array2[252:8,252:9] -Array3[252:10,252:11] -Array4[252:12,252:13] -Array5[252:14,252:15] WB Direct -a0
    
    megacli -CfgSpanAdd -r10 -Array0[252:0,252:1] -Array1[252:2,252:3] -Array2[252:4,252:5] -Array3[252:6,252:7] WB Direct -a0
    
    #在线做RAID0
    megacli -CfgLdAdd -r0[32:1] WB Direct -a0
    
    
    megacli -CfgLdAdd -r5[32:0,32:1,32:2,32:3,32:4,32:5,32:6,32:7,32:8,32:9,32:10,32:11] WB Direct -a0
    
    查看进度
    megacli -LDBI -ShowProg -LALL -a0
    
    删除RAID
    megacli -CfgLdDel -L1 -a0
    
    megacli -CfgLdDel -L2 -a0
    
    #查看大磁盘的挂载情况
    df -hT | awk '{if($3~/T$/)print}'
    
    
    
    #删除所有单盘RAID0
    for vd in $(megacli -LDInfo -LALL -a0 | grep -B 2 'Primary-0' | grep 'Virtual Drive' | awk '{print $3}');do megacli -CfgLdDel -L${vd} -a0;done
    
    yum -y install xfsprogs xfsdump xfsprogs-devel
    
    DiskName=sdc
    parted -s /dev/${DiskName} mklabel gpt
    parted -s /dev/${DiskName} mkpart xfs 0% 100%
    mkfs.xfs -f -i size=512 -l size=128m,lazy-count=1 -d agcount=64 /dev/${DiskName}1
    
    mount  -o noatime,nodiratime,inode64,nobarrier /dev/${DiskName}1 /home/service/var/data
    echo "/dev/${DiskName}1 /home/service/var/data  xfs defaults,noatime,nodiratime,inode64,nobarrier   0   0" >> /etc/fstab
    df -hT
    
    
    mount  -o noatime,nodiratime,inode64,nobarrier /dev/${DiskName}1 /data
    echo "/dev/${DiskName}1 /data  xfs defaults,noatime,nodiratime,inode64,nobarrier   0   0" >> /etc/fstab
    df -hT
    cat /etc/fstab
    
    
    parted -s /dev/sdp mklabel gpt
    parted -s /dev/sdp mkpart xfs 0 100%
    
    
    for DiskName in $(lsblk | grep disk | grep -v sda | awk '{print $1}' | sort); do parted -s /dev/${DiskName} mklabel gpt;done
    
    
    parted -s /dev/sdb mklabel gpt
    parted -s /dev/sdb mkpart xfs 0 12TB
    parted -s /dev/sdb mkpart xfs 12TB 100%
    
    mkfs.ext4 -T largefile /dev/sdq1
    
    mkfs.xfs -f -i size=512 -l size=128m,lazy-count=1 -d agcount=64 /dev/sdb2
    
    
    mount /dev/sdb1 /var/lib/docker
    mount  -o noatime,nodiratime,inode64,nobarrier /dev/sdb2 /data
    
    cp -av /var/lib/docker_bak/* /var/lib/docker/
    
    
    /dev/sdb1 /var/lib/docker ext4 defaults 0 0
    /dev/sdb2 /home/service/var/data xfs defaults 0 0
    
    
    Adapter 0: Configure Adapter Failed
    FW error description:
    The current operation is not allowed because the controller has data in cache for offline or missing virtual drives.
    Exit Code: 0x54
    解决方法:
    megacli -GetPreservedCacheList -a0
    megacli -DiscardPreservedCache -L2   -a0
    
    
    #标记磁盘为外来盘
    megacli  -PDMakeGood  -PhysDrv[32:15]   -force   -a0
    
    
    <1> 添加局部热备盘(array0表示第0个raid)
    
    megacli -PDHSP -Set -Dedicated -Array0 -physdrv[32:4] -a0
    
    <2> 添加全局热备盘
    
    megacli -pdhsp -set -physdrv[32:4] -a0
    
    <3>移除热备盘(全局和局部都可)
    
    megacli -pdhsp -rmv -physdrv[32:4] -a0
    --------------------- 
    
    
    
    
    
    
    ================
    
    【现象】
    
      换盘后,新插入磁盘无法组raid0,导致FusionStorage Block无法识别
    
    
    
    【问题详细描述及解决思路】
    
    FusionStorage Block部署完成后,存储节点插入新磁盘。
    
    使用 megacli -CfgLdAdd -r0[32:15] WB Direct -a0 尝试为其组raid0,报以下错误
    
    The specified physical disk does not have the appropriate attributes to complete the requested command.
    Exit Code: 0x26
    
    【常规解决方法】
    1、扫描外来配置的个数:
    # megacli -cfgforeign -scan -a0
    There are 1 foreign configuration(s) on controller 0.
    Exit Code: 0x00
    2、清除外来配置:
    # megacli -cfgforeign -clear -a0
    Foreign configuration 0 is cleared on controller 0.
    Exit Code: 0x00
    3、再次扫描外来配置的个数,可以发现外来配置已清除,磁盘此时可正常组raid
    # megacli -cfgforeign -scan -a0 
    There is no foreign configuration on controller 0.
    Exit Code: 0x00
    【特殊情况】
    在有些情况下(比如,这盘是从3008raid卡服务器上***插到3108raid卡服务器上的),扫描发现是没有外来配置的,然而同样是报上述错误。
    这种情况是磁盘Firmware state不正常导致,在存储节点执行megacli -PDList -a0 |grep 'Firmware state' ,可以看到此磁盘Firmware state是Unconfigured bad。忘截图了。。。
    解决步骤:
    1、执行megacli -PDMakeGood -Physdrv[32:7] -a0 ,将硬盘状态改为unconfigured googd
    2、执行组raid命令,重启smio,问题解决
    
    
    
    
    
    
    
    
    命令解释:
    CfgSpanAdd:创建RAID命令
    -r10:创建RAID10,如果是RAID5则是-r5
    -Array0[252:4,252:5] : RAID10相当于RAID1+RAID0,本例中为磁盘编号为4的物理磁盘和编号为5的物理磁盘组成一个RAID1,磁盘编号为6的物理磁盘和编号为7的物理磁盘组成一个RAID1,然后两个RAID1组成一个RAID0。(其中252是Enclosure Device ID的值)
    如有更多盘做阵列则类推-Array2和-Array3排序下去。
    
    
    -DiscardPreservedCache -L12 -a0
    
    
    
    一、查看Raid卡信息
    megacli -LDInfo -Lall -aALL 
    
    [root@ robin]# megacli -LDInfo -Lall -aALL 
                                         
    OSSpecificInitialize: Failed to load libsysfs.so.2.0.2 Please ensure that libsfs is present in the system.
    The dependent library libsysfs.so.2.0.1 not available. Please contact LSI for distribution of the package
    
    Adapter 0 -- Virtual Drive Information:
    Virtual Drive: 0 (Target Id: 0)
    Name                :
    RAID Level          : Primary-1, Secondary-0, RAID Level Qualifier-0
    Size                : 930.5 GB
    Mirror Data         : 930.5 GB
    State               : Optimal  <<<< 这是正常的状态。
    
    Strip Size          : 64 KB
    Number Of Drives per span:2  <<<< RAID组包含2块磁盘(但实际是4块500G的SSD做Raid10)。
    
    Span Depth          : 2 <<<< 深度若是1,说明实际只是RAID 1。2表示可以是RAID10
    
    Default Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU
    Current Cache Policy: WriteBack, ReadAdaptive, Direct, No Write Cache if Bad BBU
    Default Access Policy: Read/Write
    Current Access Policy: Read/Write
    Disk Cache Policy   : Disk's Default
    Ongoing Progresses:
      Background Initialization: Completed 41%, Taken 190 min.
    Encryption Type     : None
    Default Power Savings Policy: Controller Defined
    Current Power Savings Policy: None
    Can spin up in 1 minute: No
    LD has drives that support T10 power conditions: No
    LD's IO profile supports MAX power savings with cached writes: No
    Bad Blocks Exist: No
    Is VD Cached: No
  • 相关阅读:
    移除“xmlns”命名空间
    求质数几种算法
    删除重复字符串的算法
    第一章 搭建一个通用的.net core项目框架
    LSJ_NHibernate第四章 MVC
    LSJ_NHibernate第三章 IDAL,DAL,BLL
    LSJ_NHibernate第二章 ManagerPage
    LSJ_NHibernate第一章 NHibernate介绍
    SQL Server2008R无法登录的解决方案(系统文件损坏)1814 18456....不看别后悔,有用请点赞
    lsjORM ----让开发变得更加快捷(二)
  • 原文地址:https://www.cnblogs.com/lwhctv/p/11676142.html
Copyright © 2020-2023  润新知