• ceph安装问题


    ceph-deploy安装

    Yum priorities plugin
    Loaded plugins: fastestmirror

    Loaded plugins: fastestmirror, priorities


    yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm

    cat << EOM > /etc/yum.repos.d/ceph.repo
    [ceph-noarch]
    name=Ceph noarch packages
    baseurl=https://download.ceph.com/rpm-mimic/el7/noarch
    enabled=1
    gpgcheck=1
    type=rpm-md
    gpgkey=https://download.ceph.com/keys/release.asc
    EOM

    yum install ceph-deploy ntp ntpdate ntp-doc -y

    ssh-keygen
    #下面得一条条执行
    ssh-copy-id ceph1
    ssh-copy-id ceph2
    ssh-copy-id ceph3

    echo "192.168.7.151 ceph1" >> /etc/hosts
    echo "192.168.7.152 ceph2" >> /etc/hosts
    echo "192.168.7.153 ceph3" >> /etc/hosts
    systemctl stop firewalld
    systemctl disable firewalld
    sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
    setenforce 0
    cat /etc/sysconfig/selinux

    ##################################################

    useradd sceph
    passwd sceph

    echo "sceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/sceph
    chmod 0440 /etc/sudoers.d/sceph

    chmod u+w /etc/sudoers.d/sceph
    vi /etc/sudoers.d/sceph
    Defaults:sceph !requiretty
    chmod u-w /etc/sudoers.d/sceph

    wget https://bootstrap.pypa.io/get-pip.py
    python get-pip.py
    pip -V


    安装这个就行了
    yum -y install python2-pip


    mkdir my-cluster
    cd my-cluster

    ##################################################
    ceph-deploy new ceph1
    ceph-deploy install ceph1 ceph2 ceph3


    就这5个关键包
    epel-release
    yum-plugin-priorities
    ceph-release
    ceph.x86_64 2:13.2.5-0.el7
    ceph-radosgw.x86_64 2:13.2.5-0.el7
    sudo yum -y install ceph ceph-radosgw

    mount -t ceph 192.168.7.101:6789:/ /mnt/mycephfs1 -o name=admin,secretfile=/etc/ceph/admin.secret

    AQAY9JJcbtuaExAA2wVIqz6w5KrEiOA1S3JIMA==

    ceph-deploy mon create-initial

     =============================================================================

    使用Ceph RBD为Kubernetes集群提供存储卷
    集成过程依旧少不了“趟坑”,
    Third party cloud provisioning platforms such as OpenStack, CloudStack, OpenNebula, ProxMox, etc.

    ===================================================
    [sceph@ceph1 ~]$ ceph-deploy new ceph1
    [ceph_deploy.conf][DEBUG ] found configuration file at: /home/sceph/.cephdeploy. conf
    [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy new ceph1
    [ceph_deploy.cli][INFO ] ceph-deploy options:
    [ceph_deploy.cli][INFO ] username : None
    [ceph_deploy.cli][INFO ] func : <function new at 0x7f af668ec320>
    [ceph_deploy.cli][INFO ] verbose : False
    [ceph_deploy.cli][INFO ] overwrite_conf : False
    [ceph_deploy.cli][INFO ] quiet : False
    [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cep hdeploy.Conf instance at 0x7faf66061830>
    [ceph_deploy.cli][INFO ] cluster : ceph
    [ceph_deploy.cli][INFO ] ssh_copykey : True
    [ceph_deploy.cli][INFO ] mon : ['ceph1']
    [ceph_deploy.cli][INFO ] public_network : None
    [ceph_deploy.cli][INFO ] ceph_conf : None
    [ceph_deploy.cli][INFO ] cluster_network : None
    [ceph_deploy.cli][INFO ] default_release : False
    [ceph_deploy.cli][INFO ] fsid : None
    [ceph_deploy.new][DEBUG ] Creating new cluster named ceph
    [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
    [ceph1][DEBUG ] connection detected need for sudo
    [ceph1][DEBUG ] connected to host: ceph1
    [ceph1][DEBUG ] detect platform information from remote host
    [ceph1][DEBUG ] detect machine type
    [ceph1][DEBUG ] find the location of an executable
    [ceph1][INFO ] Running command: sudo /usr/sbin/ip link show
    [ceph1][INFO ] Running command: sudo /usr/sbin/ip addr show
    [ceph1][DEBUG ] IP addresses found: [u'192.168.7.151']
    [ceph_deploy.new][DEBUG ] Resolving host ceph1
    [ceph_deploy.new][DEBUG ] Monitor ceph1 at 192.168.7.151
    [ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph1']
    [ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.7.151']
    [ceph_deploy.new][DEBUG ] Creating a random mon key...
    [ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
    [ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
    [sceph@ceph1 ~]$ ls
    ceph.conf ceph-deploy-ceph.log ceph.mon.keyring my-cluster


    =========================================================

    问题1

    [ceph1][DEBUG ] Installed:
    [ceph1][DEBUG ] ceph-release.noarch 0:1-1.el7
    [ceph1][DEBUG ]
    [ceph1][DEBUG ] Complete!
    [ceph1][WARNIN] ensuring that /etc/yum.repos.d/ceph.repo contains a high priority
    [ceph_deploy][ERROR ] RuntimeError: NoSectionError: No section: 'ceph'


    解决
    yum remove ceph-release
    [root@ceph3 ~]# cd /etc/yum.repos.d/
    [root@ceph3 yum.repos.d]# ll
    total 48
    -rw-r--r--. 1 root root 1664 Nov 23 21:16 CentOS-Base.repo
    -rw-r--r--. 1 root root 1309 Nov 23 21:16 CentOS-CR.repo
    -rw-r--r--. 1 root root 649 Nov 23 21:16 CentOS-Debuginfo.repo
    -rw-r--r--. 1 root root 314 Nov 23 21:16 CentOS-fasttrack.repo
    -rw-r--r--. 1 root root 630 Nov 23 21:16 CentOS-Media.repo
    -rw-r--r--. 1 root root 1331 Nov 23 21:16 CentOS-Sources.repo
    -rw-r--r--. 1 root root 5701 Nov 23 21:16 CentOS-Vault.repo
    -rw-r--r-- 1 root root 535 May 5 2018 ceph.repo.rpmnew
    -rw-r--r--. 1 root root 178 Mar 20 17:34 ceph.repo.rpmsave
    -rw-r--r--. 1 root root 951 Oct 3 2017 epel.repo
    -rw-r--r--. 1 root root 1050 Oct 3 2017 epel-testing.repo
    [root@ceph3 yum.repos.d]# rm -rf ceph.repo.rpm*


    ========================================================

    问题2

    [ceph_deploy.install][DEBUG ] Detecting platform for host ceph2 ...
    [ceph2][DEBUG ] connection detected need for sudo

    We trust you have received the usual lecture from the local System
    Administrator. It usually boils down to these three things:

    #1) Respect the privacy of others.
    #2) Think before you type.
    #3) With great power comes great responsibility.

    sudo: no tty present and no askpass program specified
    [ceph_deploy][ERROR ] RuntimeError: connecting to host: ceph2 resulted in errors: IOError cannot send (already closed?)

    解决
    useradd sceph
    passwd sceph

    echo "sceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/sceph
    chmod 0440 /etc/sudoers.d/sceph

    chmod u+w /etc/sudoers.d/sceph
    vi /etc/sudoers.d/sceph
    Defaults:sceph !requiretty
    chmod u-w /etc/sudoers.d/sceph


    ========================================================
    问题3
    [sceph@ceph1 ~]$ ceph -s
    2019-03-21 17:44:54.373 7fee143b6700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
    2019-03-21 17:44:54.373 7fee143b6700 -1 monclient: ERROR: missing keyring, cannot use cephx for authentication
    [errno 2] error connecting to the cluster

    解决
    要用root用户运行
    sudo ceph -s


    ========================================================
    问题4
    [root@ceph1 ceph]# ceph -s
    cluster:
    id: d45b9e18-518c-4d01-bb41-b341c576e3c0
    health: HEALTH_WARN
    too few PGs per OSD (8 < min 30)

    解决
    8个pgs
    3个osd
    3副本

    pgs/osd*replicas=8
    8/3*3=8

    每个osd上均分了8/3 *3=8个pgs


    =================================================
    调优参数查看,在dashboard中的Cluster>Configuration Documentation中查看
    osd_memory_target
    mon_osd_cache_size
    mds_cache_size

    缓存盘的使用,在日志上
    osd_journal=/var/lib/ceph/osd/$cluster-$id/journal
    数据盘的使用,在objects上


    bluestore
    filestore这个已过时

    ========================================================

    yum-plugin-priorities

    yum源优先级的一个文件。
    是yum-plugin-priroites这个插件的一个文件。
    用来给yum源分优先级的。
    比如你在centos下有centos,epel,rpmfusion三个yum源。
    三个yum源中可能含有相同的软件,补丁之类的东西。
    yum管理器为了分辨更新系统或者安装软件的时候用那个yum源的软件所以才有这么个东西。
    如果说,设置centos官方的yum源优先级最高,epelyum源第二,rpmfusion第三。(用1到99来表示,1最高)
    那在安装程序的时候,先寻找centos的yum源,如果源里面有要的程序,那就停止寻找,直接安装找到的,如果没有找到,就依次寻找epel和rpmfusion的源。
    如果说三个yum源都含有同一个软件,那就安装优先级最高的yum源的。

    ========================================================


    sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph1.asok mon_status

    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.ceph1.asok mon_status
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.admin
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-mds
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-mgr
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-osd
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-rgw

    ========================================================

    debian,pve,ceph独立安装

    没有成功,在于pve的ceph版本问题,pve5.4对应ceph luminous


    配置源的方式
    https://download.ceph.com/debian-nautilus/ stretch main
    http://mirrors.163.com/ceph/debian-nautilus/ stretch main
    'deb https://download.ceph.com/debian-luminous/ {codename} main'

    wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
    wget -q -O- 'http://mirrors.163.com/ceph/keys/release.asc' | sudo apt-key add -
    sudo apt-get update && sudo apt-get install ceph ceph-mds


    wget -O- 'http://mirrors.163.com/ceph/keys/release.asc' | apt-key add -
    apt-get update && apt-get install ceph ceph-mds


    主动下载的方式
    If you are attempting to install behind a firewall in an environment without internet access,

    wget -q https://download.ceph.com/debian-{release}/pool/main/c/ceph/ceph_{version}{distro}_{arch}.deb

    sudo apt-get update && sudo apt-get install ceph-deploy


    apt-get install ceph-mds
    ceph-mds
    ceph-fs-common


    apt-get install ceph

    The following additional packages will be installed:
    binutils ceph-base ceph-mon ceph-osd cryptsetup-bin javascript-common libjs-jquery libleveldb1v5 libopts25 libparted2 ntp parted python-blinker python-click python-colorama python-flask python-itsdangerous python-jinja2
    python-markupsafe python-pyinotify python-simplejson python-werkzeug uuid-runtime xfsprogs
    Suggested packages:
    binutils-doc ceph-mds apache2 | lighttpd | httpd libparted-dev libparted-i18n ntp-doc parted-doc python-blinker-doc python-flask-doc python-jinja2-doc python-pyinotify-doc ipython python-genshi python-lxml python-greenlet
    python-redis python-pylibmc | python-memcache python-werkzeug-doc xfsdump acl quota
    The following NEW packages will be installed:
    binutils ceph ceph-base ceph-mon ceph-osd cryptsetup-bin javascript-common libjs-jquery libleveldb1v5 libopts25 libparted2 ntp parted python-blinker python-click python-colorama python-flask python-itsdangerous python-jinja2
    python-markupsafe python-pyinotify python-simplejson python-werkzeug uuid-runtime xfsprogs
    0 upgraded, 25 newly installed, 0 to remove and 8 not upgraded.
    Need to get 28.4 MB of archives.
    After this operation, 115 MB of additional disk space will be used.


    root@d1:/etc/apt# dpkg -l|grep ceph
    ii ceph 10.2.11-2 amd64 distributed storage and file system
    ii ceph-base 10.2.11-2 amd64 common ceph daemon libraries and management tools
    ii ceph-common 10.2.11-2 amd64 common utilities to mount and interact with a ceph storage cluster
    ii ceph-fs-common 10.2.11-2 amd64 common utilities to mount and interact with a ceph file system
    ii ceph-fuse 10.2.11-2 amd64 FUSE-based client for the Ceph distributed file system
    ii ceph-mds 10.2.11-2 amd64 metadata server for the ceph distributed file system
    ii ceph-mon 10.2.11-2 amd64 monitor server for the ceph storage system
    ii ceph-osd 10.2.11-2 amd64 OSD server for the ceph storage system
    ii libcephfs1 10.2.11-2 amd64 Ceph distributed file system client library
    ii python-cephfs 10.2.11-2 amd64 Python libraries for the Ceph libcephfs library


    root@d1:~# dpkg -l|wc -l
    595
    root@d1:~# dpkg -l|grep ceph
    ii ceph-common 10.2.11-2 amd64 common utilities to mount and interact with a ceph storage cluster
    ii ceph-fuse 10.2.11-2 amd64 FUSE-based client for the Ceph distributed file system
    ii libcephfs1 10.2.11-2 amd64 Ceph distributed file system client library
    ii python-cephfs 10.2.11-2 amd64 Python libraries for the Ceph libcephfs library

    visudo
    c1 ALL(ALL) ALL

    sudo vi /etc/ceph/ceph.conf
    uuidgen

    ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
    sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
    sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
    sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
    sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
    monmaptool --create --add d1 192.168.8.2 --fsid 0f0baa68-0787-4b32-a3c3-e46b6e50c5f6 /tmp/monmap
    sudo mkdir /var/lib/ceph/mon/ceph-d1
    sudo -u ceph ceph-mon --mkfs -i d1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring


    [global]
    fsid = 0f0baa68-0787-4b32-a3c3-e46b6e50c5f6
    mon initial members = d1
    mon host = 192.168.8.2
    public network = 192.168.8.0/24
    auth cluster required = cephx
    auth service required = cephx
    auth client required = cephx
    osd journal size = 1024
    osd pool default size = 3
    osd pool default min size = 2
    osd pool default pg num = 333
    osd pool default pgp num = 333
    osd crush chooseleaf type = 1

    sudo systemctl start ceph-mon@d1

    sudo /etc/init.d/ceph start mon.d1

    ceph -s

  • 相关阅读:
    Delphi以及三方控件的源代码规模
    InitInheritedComponent的执行过程
    poj 3897 Maze Stretching 二分+A*搜索
    一些窗口API函数,比如SetForegroundWindow,SwitchToThisWindow
    终于懂了:WM_PAINT 与 WM_ERASEBKGND(三种情况:用户操作,UpdateWindow,InvalidateRect产生的效果并不相同),并且用Delphi代码验证 good
    窗口绘制有关的消息整理 WM_PAINT, WM_NCPAINT, WM_ERASEBKGND
    WM_PAINT与WM_ERASEBKGND(用户操作和API这两种情况产生消息的顺序有所不同)
    关于WM_ERASEBKGND和WM_PAINT的深刻理解
    offsetHeight在OnLoad中为0的现象
    TWinControl.WMNCPaint对非客户的绘制
  • 原文地址:https://www.cnblogs.com/createyuan/p/10820309.html
Copyright © 2020-2023  润新知