/dev/sda
as system and /dev/sdb
and /dev/sdc
for the Ceph FS file system. The OS in this example will be Ubuntu 12.04 LTS. Another server will mount the file system, that is, in fact act as a client. We use the default level of redundancy, that is, two replicas of one block.mkcephfs
or the new ceph-deploy
. For newer versions, starting with the 0.6x branch (cuttlefish), it is already recommended to use ceph-deploy
. But in this example, I use the earlier, stable release of the 0.56.x branch (bobtail), using mkcephfs
.ntpdate
and your favorite editor, for example vim
. aptitude update && aptitude install ntpdate vim
wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add - echo deb http://ceph.com/debian-bobtail/ $(lsb_release -sc) main | tee /etc/apt/sources.list.d/ceph.list aptitude update && aptitude install ceph
/etc/ceph/ceph.conf
[global] auth cluster required = cephx auth service required = cephx auth client required = cephx [osd] osd journal size = 2000 osd mkfs type = xfs osd mkfs options xfs = -f -i size=2048 osd mount options xfs = rw,noatime,inode64 [mon.a] host = node01 mon addr = 192.168.2.31:6789 [mon.b] host = node02 mon addr = 192.168.2.32:6789 [mon.c] host = node03 mon addr = 192.168.2.33:6789 [osd.0] host = node01 devs = /dev/sdb [osd.1] host = node01 devs = /dev/sdc [osd.2] host = node02 devs = /dev/sdb [osd.3] host = node02 devs = /dev/sdc [osd.4] host = node03 devs = /dev/sdb [osd.5] host = node03 devs = /dev/sdc [mds.a] host = node01
chmod 644 /etc/ceph/ceph.conf
passwd root ssh-keygen
/root/.ssh/config
according to the name of the node in your case Host node01 Hostname node01.ceph.labspace.studiogrizzly.com User root Host node02 Hostname node02.ceph.labspace.studiogrizzly.com User root Host node03 Hostname node03.ceph.labspace.studiogrizzly.com User root
ssh-copy-id root@node02 ssh-copy-id root@node03
mkfs -t xfs fs-options -f -i size=2048 /dev/sdb mkfs -t xfs fs-options -f -i size=2048 /dev/sdc
mkdir -p /var/lib/ceph/osd/ceph-0 mkdir -p /var/lib/ceph/osd/ceph-1 mount /dev/sdb /var/lib/ceph/osd/ceph-0 -o noatime,inode64 mount /dev/sdc /var/lib/ceph/osd/ceph-1 -o noatime,inode64
mkdir -p /var/lib/ceph/osd/ceph-2 mkdir -p /var/lib/ceph/osd/ceph-3 mount /dev/sdb /var/lib/ceph/osd/ceph-2 -o noatime,inode64 mount /dev/sdc /var/lib/ceph/osd/ceph-3 -o noatime,inode64
mkdir -p /var/lib/ceph/osd/ceph-4 mkdir -p /var/lib/ceph/osd/ceph-5 mount /dev/sdb /var/lib/ceph/osd/ceph-4 -o noatime,inode64 mount /dev/sdc /var/lib/ceph/osd/ceph-5 -o noatime,inode64
mkcephfs -a -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.keyring
ceph.keyring
to the other nodes of the cluster scp /etc/ceph/ceph.keyring node02:/etc/ceph/ceph.keyring scp /etc/ceph/ceph.keyring node03:/etc/ceph/ceph.keyring
192.168.2.39
scp /etc/ceph/ceph.keyring 192.168.2.39:/etc/ceph/ceph.keyring
chmod 644 /etc/ceph/ceph.keyring
service ceph -a start
ceph -s
HEALTH_OK
/mnt/cephfs
, parse the key for the ceph
kernel ceph
and mount the file system mkdir /mnt/cephfs ceph-authtool --name client.admin /etc/ceph/ceph.keyring --print-key | tee /etc/ceph/admin.secret mount -t ceph node01:6789,node02:6789,node03:6789:/ /mnt/cephfs -o name=admin,secretfile=/etc/ceph/admin.secret,noatime
Source: https://habr.com/ru/post/179823/
All Articles