How to install ceph-nautilus on CentOS7

2020-01-02 15:45:04
Prepare

System: CentOS 7.5
IP: 192.168.1.129-132
Hostname: admin node1-3
Disk:/dev/sdb 100G
Role: admin osd

  • replace ali yum source

    1
    2
    3
    4
    mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
    wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
    yum clean up && yum makecache && yum update -y
  • set ceph source and epel source, turn off firewalld

    1
    2
    3
    4
    5
    rpm -Uvh https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/ceph-release-1-1.el7.noarch.rpm
    yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm

    systemctl stop firewalld
    systemctl disable firewalld
  • make sure time sync,all node install chrony

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    yum install -y chrony

    #admin node sync aliyun ntp server
    sudo sed -i '/^server .*/d' /etc/chrony.conf
    sudo sed -ri '3s#^$#server ntp1.aliyun.com iburst\nallow 192.168.1.0/24#' /etc/chrony.conf
    sudo systemctl enable chronyd
    sudo systemctl restart chronyd

    # other node sync admin node
    ssh node1 "sudo sed -i '/^server .*/d' /etc/chrony.conf"
    ssh node1 "sudo sed -ri '3s#^\$#server 192.168.1.129 iburst#' /etc/chrony.conf"
    ssh node2 "sudo sed -i '/^server .*/d' /etc/chrony.conf"
    ssh node2 "sudo sed -ri '3s#^\$#server 192.168.1.129 iburst#' /etc/chrony.conf"
    ssh node3 "sudo sed -i '/^server .*/d' /etc/chrony.conf"
    ssh node3 "sudo sed -ri '3s#^\$#server 192.168.1.129 iburst#' /etc/chrony.conf"

    #reboot chrony service
    ssh node1 sudo systemctl enable chronyd
    ssh node1 sudo systemctl restart chronyd
    ssh node2 sudo systemctl enable chronyd
    ssh node2 sudo systemctl restart chronyd
    ssh node3 sudo systemctl enable chronyd
    ssh node3 sudo systemctl restart chronyd
Install ceph
  • install ceph-deploy

    1
    yum install -y https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/ceph-deploy-2.0.1-0.noarch.rpm
  • install ceph

    1
    ceph-deploy install admin node1 node2 node3
  • add mon

    1
    2
    ceph-deploy new node1 node2 node3
    ceph-deploy mon create-initial
  • create ceph admin node

    1
    ceph-deploy mgr create node1 node2 node3
  • create osd node

    1
    2
    3
    ceph-deploy osd create --data /dev/sdb node1
    ceph-deploy osd create --data /dev/sdb node2
    ceph-deploy osd create --data /dev/sdb node3
  • enable dashboard

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    #from nautilus, dashboard as Independent Modules. need install on all mgr node
    yum install -y ceph-mgr-dashboard
    # if error
    pip uninstall urllib3
    yum install python-urllib3 -y

    #enable dashboard
    ceph mgr module enable dashboard --force

    # enable ssl/tls
    ceph mgr module enable dashboard --force

    # create admin user
    ceph dashboard ac-user-create admin admin administrator

    # view ceph-mgr service
    ceph mgr services
  • sync conf to all node and copy mgr file to admin

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    # only push config
    [root@admin ~]#ceph-deploy --overwrite-conf admin node1 node2 node3

    # push config and client.admin key
    [root@admin ~]# ceph-deploy admin admin

    [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy admin admin
    [ceph_deploy.cli][INFO ] ceph-deploy options:
    [ceph_deploy.cli][INFO ] username : None
    [ceph_deploy.cli][INFO ] verbose : False
    [ceph_deploy.cli][INFO ] overwrite_conf : False
    [ceph_deploy.cli][INFO ] quiet : False
    [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f21349cad40>
    [ceph_deploy.cli][INFO ] cluster : ceph
    [ceph_deploy.cli][INFO ] client : ['admin']
    [ceph_deploy.cli][INFO ] func : <function admin at 0x7f21356e01b8>
    [ceph_deploy.cli][INFO ] ceph_conf : None
    [ceph_deploy.cli][INFO ] default_release : False
    [ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to admin
    [admin][DEBUG ] connected to host: admin
    [admin][DEBUG ] detect platform information from remote host
    [admin][DEBUG ] detect machine type
    [admin][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf

    [root@admin ~]# ceph -s
    cluster:
    id: 376a41f4-2aa1-4f96-9bcb-700f2787ebd8
    health: HEALTH_OK

    [root@admin ~]# ceph osd tree
    ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
    -1 0.29008 root default
    -3 0.09669 host node1
    0 hdd 0.09669 osd.0 up 1.00000 1.00000
    -5 0.09669 host node2
    1 hdd 0.09669 osd.1 up 1.00000 1.00000
    -7 0.09669 host node3
    2 hdd 0.09669 osd.2 up 1.00000 1.00000
how to clear,if you reinstall
1
2
3
[root@admin ~]#  ceph-deploy purge admin node1 node2 node3
[root@admin ~]# ceph-deploy purgedata admin node1 node2 node3
[root@admin ~]# ceph-deploy forgetkeys

ref
ceph dashboard