0
点赞
收藏
分享

微信扫一扫

2. Ceph集群部署 v14.2.16


Ceph集群部署 v14.2.16

一、安装前准备

1、安装要求

至少三台Centos系统虚拟机部署Ceph集群。 另外每台机器最少挂载三块硬盘(每块盘50G)

系统名称

IP

角色

硬盘

ceph01

192.168.10.40

Ceph+ceph-deploy

/dev/sda (系统)

/dev/sdb(数据盘 1024G)

/dev/sdc(数据盘 1024G)

/dev/sdb(数据盘 1024G)

ceph02

192.168.10.41

Ceph

/dev/sda (系统)

/dev/sdb(数据盘 1024G)

/dev/sdc(数据盘 1024G)

/dev/sdb(数据盘 1024G)

ceph03

192.168.10.42

Ceph

/dev/sda (系统)

/dev/sdb(数据盘 1024G)

/dev/sdc(数据盘 1024G)

/dev/sdb(数据盘 1024G)

检查磁盘状况

2. Ceph集群部署 v14.2.16_主机名

2、环境准备(在Ceph三台机器上操作)

#(1)关闭防火墙:

#(2)关闭selinux:

#(3)在ceph01上配置免密登录到ceph02、ceph03
ssh-kekgen (一路回车)
ssh-copy-id [节点IP]

#(4)设置主机名:
hostnamectl set-hostname ceph01
hostnamectl set-hostname ceph02
hostnamectl set-hostname ceph03

#(5)在cephnode01上添加主机名与IP对应关系:发送到其他俩台节点
echo '192.168.10.40 ceph01
192.168.10.41 ceph02
192.168.10.42 ceph03' >> /etc/hosts
for ip in 41 42;do scp -rp /etc/hosts 192.168.10.$ip:/etc/hosts ;done


#(6)设置文件描述符
echo "ulimit -SHn 102400" >> /etc/rc.local
cat >> /etc/security/limits.conf << EOF
* soft nofile 65535
* hard nofile 65535
EOF

#(7)内核参数优化
echo 'net.ipv4.ip_forward = 1' >>/etc/sysctl.conf
echo 'kernel.pid_max = 4194303' >>/etc/sysctl.conf
echo "vm.swappiness = 0" >>/etc/sysctl.conf
sysctl -p

#(8)同步网络时间和修改时区
yum install chrony -y
vim /etc/chrony.conf
server ntp1.aliyun.com iburst
allow 192.168.10.0/24
---
systemctl restart chronyd.service
systemctl enable chronyd.service
chronyc sources
其他节点上也安装chrony时间同步 同步cephnode01节点
yum install chrony -y
vim /etc/chrony.conf
server ceph01 iburst
---
systemctl restart chronyd.service
systemctl enable chronyd.service
chronyc sources

#(9)read_ahead,通过数据预读并且记载到随机访问内存方式提高磁盘读操作
echo "8192" > /sys/block/sda/queue/read_ahead_kb

#(10) I/O Scheduler,SSD要用noop(电梯式调度程序),SATA/SAS使用deadline(截止时间调度程序)
echo "deadline" >/sys/block/sda/queue/scheduler
echo "deadline" >/sys/block/sdb/queue/scheduler
echo "deadline" >/sys/block/sdc/queue/scheduler
echo "deadline" >/sys/block/sdd/queue/scheduler

#echo "noop" >/sys/block/sd[x]/queue/scheduler

#(11) 设置yum源
ecph 源镜像地址
https://mirrors.tuna.tsinghua.edu.cn/ceph/
http://mirrors.163.com/ceph

[root@ceph01 ~]# echo '[Ceph1]
name=Ceph packages for $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

[Ceph-noarch]
name=Ceph
baseurl=https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-nautilus/el7/noarch/
enable=1
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
priority=1' >/etc/yum.repos.d/ceph.repo

二、安装Ceph集群

1、在ceph01上安装ceph-deploy

ceph-deploy Ceph部署工具

yum install -y ceph-deploy

ceph-deploy --version
2.0.1

#测试一下发现报错的话处理方法:
[root@ceph01 ~]# ceph-deploy --version
Traceback (most recent call last):
File "/usr/bin/ceph-deploy", line 18, in <module>
from ceph_deploy.cli import main
File "/usr/lib/python2.7/site-packages/ceph_deploy/cli.py", line 1, in <module>
import pkg_resources
ImportError: No module named pkg_resources

#原因是缺python-setuptools,安装它即可
yum install python-setuptools -y
ceph-deploy --version
2.0.1

2、 创建一个ceph-cluster目录​,​! ! !所有命令在此目录下进行​(文件位置和名字可以随意)

mkdir /my-cluster
cd /my-cluster

3、 安装Ceph软件​(每个节点执行)

yum install -y ceph

#二选一
#或者在管理节点上执行
ceph-deploy install ceph01 ceph02 ceph03

2. Ceph集群部署 v14.2.16_主机名_02

4、首次集群创建​(管理节点操作)

#1.创建一个Ceph集群(建议是奇数) 
ceph-deploy new ceph01 ceph02 ceph03

#2.生成monitor检测集群所使用的的秘钥(激活监控节点)
ceph-deploy mon create-initial

#3.分发配置文件
ceph-deploy admin ceph01 ceph02 ceph03

#4.配置mgr,用于管理集群
ceph-deploy mgr create ceph01 ceph02 ceph03

#5. 添加osd(添加的磁盘必须是没有被处理过的裸盘)
ceph-deploy osd create --data /dev/sdb ceph01
ceph-deploy osd create --data /dev/sdc ceph01
ceph-deploy osd create --data /dev/sdd ceph01

ceph-deploy osd create --data /dev/sdb ceph02
ceph-deploy osd create --data /dev/sdc ceph02
ceph-deploy osd create --data /dev/sdd ceph02

ceph-deploy osd create --data /dev/sdb ceph03
ceph-deploy osd create --data /dev/sdc ceph03
ceph-deploy osd create --data /dev/sdd ceph03

#osd删除
去节点停止要删除的osd
systemctl stop ceph-osd@0
ceph osd out osd.0
ceph osd crush rm osd.0
ceph auth del osd.0
ceph osd rm osd.0

5、查看集群状态

ceph -s 轻松查看集群 状态

[root@ceph01 my-cluster]# ceph -s 
cluster:
id: 55aec6dc-73bc-4f90-bfbc-2193e92fe230
health: HEALTH_OK //状态为HEALTH_OK证明当前集群正常

services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 3m)
mgr: ceph02(active, since 2m), standbys: ceph01, ceph03 //查看mgr状态信息。
osd: 9 osds: 9 up (since 9s), 9 in (since 9s) //查看磁盘osd信息

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 9.0 GiB used, 9.0 TiB / 9.0 TiB avail
pgs:


###报错解决
[root@ceph01 my-cluster]# ceph -s
cluster:
id: 31bccbf2-b938-4a43-9f89-b629162bab2f
health: HEALTH_WARN
Module 'restful' has failed dependency: No module named 'pecan' //报错

解决 安装相应的python
pip3 install pecan werkzeug
重启系统生效

ceph os2. Ceph集群部署 v14.2.16_linux_03



举报

相关推荐

0 条评论