mkdir ~/ceph-admin cd ~/ceph-admin
apt install ceph-deploy ceph-common
ssh-keygen ssh-copy-id kub01 ssh-copy-id kub02 ssh-copy-id kub03
ceph-deploy new kub01 kub02 kub03 ceph-deploy install kub01 kub02 kub03 ceph-deploy mon create-initial ceph-deploy osd prepare kub01:sda2 kub02:sda2 kub03:sda2 ceph-deploy osd activate kub01:sda2 kub02:sda2 kub03:sda2
ceph -s cluster 363a4cd8-4cb3-4955-96b2-73da72b63cf5 health HEALTH_OK
ceph -s ceph df ceph osd tree
ceph osd pool create kube 100 100
ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube' ceph auth get-key client.admin > /etc/ceph/client.admin ceph auth get-key client.kube > /etc/ceph/client.kube
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - cat <<EOF >/etc/apt/sources.list.d/kubernetes.list deb http://apt.kubernetes.io/ kubernetes-xenial main EOF
apt update apt install -y docker.io kubelet kubeadm kubernetes-cni
kubeadm init --pod-network-cidr=10.244.0.0/16
useradd -s /bin/bash -m kube mkdir ~kube/.kube cp /etc/kubernetes/admin.conf ~kube/.kube/config chown kube: ~kube/.kube/config
su - kube
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl -n kube-system get pods
NAME READY STATUS RESTARTS AGE etcd-kub01.domain.com 1/1 Running 1 4d kube-apiserver-kub01.domain.com 1/1 Running 1 4d kube-controller-manager-kub01.domain.com 1/1 Running 0 4d kube-dns-7c6d8859cb-dmqrn 3/3 Running 0 1d kube-flannel-ds-j948h 1/1 Running 0 1d kube-proxy-rmbqq 1/1 Running 0 1d kube-scheduler-kub01.domain.com 1/1 Running 1 4d
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
cat << EOF > account.yaml apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system EOF kubectl -n kube-system create -f account.yaml
kubectl proxy &
ssh -L 8001:127.0.0.1:8001 -N kub01 &
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
mkdir docker cat << EOF > docker/Dockerfile FROM ubuntu:16.04 ARG KUBERNETES_VERSION=v1.9.2 ENV DEBIAN_FRONTEND=noninteractive \ container=docker \ KUBERNETES_DOWNLOAD_ROOT=https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64 \ KUBERNETES_COMPONENT=kube-controller-manager RUN set -x \ && apt-get update \ && apt-get install -y \ ceph-common \ curl \ && curl -L ${KUBERNETES_DOWNLOAD_ROOT}/${KUBERNETES_COMPONENT} -o /usr/bin/${KUBERNETES_COMPONENT} \ && chmod +x /usr/bin/${KUBERNETES_COMPONENT} \ && apt-get purge -y --auto-remove \ curl \ && rm -rf /var/lib/apt/lists/* EOF docker build -t "my-kube-controller-manager:v1.9.2" docker/
docker images | grep my-kube-controller-manager
docker run my-kube-controller-manager:v1.9.2 whereis rbd
kubectl -n kube-system describe pods | grep kube-controller
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" --from-file=/etc/ceph/client.admin --namespace=kube-system kubectl create secret generic ceph-secret-kube --type="kubernetes.io/rbd" --from-file=/etc/ceph/client.kube --namespace=default
cat << EOF > ceph_storage.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: ceph-rbd annotations: {"storageclass.kubernetes.io/is-default-class":"true"} provisioner: kubernetes.io/rbd parameters: monitors: kub01:6789,kub02:6789,kub03:6789 pool: kube adminId: admin adminSecretName: ceph-secret adminSecretNamespace: "kube-system" userId: kube userSecretName: ceph-secret-kube fsType: ext4 imageFormat: "2" imageFeatures: "layering" EOF kubectl create -f ceph_storage.yaml
kube@kub01:~$ kubectl get storageclass NAME PROVISIONER AGE ceph-rbd (default) kubernetes.io/rbd 4d
cat << EOF > test_pod.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: claim1 spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi --- apiVersion: v1 kind: Pod metadata: name: test-pod-with-pvc spec: volumes: - name: test-pvc-storage persistentVolumeClaim: claimName: claim1 containers: - name: test-container image: kubernetes/pause volumeMounts: - name: test-pvc-storage mountPath: /var/lib/www/html EOF kubectl create -f test_pod.yaml
kube@kub01:~$ kubectl get pods NAME READY STATUS RESTARTS AGE test-pod-with-pvc 1/1 Running 0 15m
kube@kub01:~$ kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE claim1 Bound pvc-076df6ee-0ce9-11e8-8b93-901b0e8fc39b 1Gi RWO ceph-rbd 12m
kube@kub01:~$ kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-076df6ee-0ce9-11e8-8b93-901b0e8fc39b 1Gi RWO Delete Bound default/claim1 ceph-rbd
root@kub01:~$ mount | grep pvc-076df6ee-0ce9-11e8-8b93-901b0e8fc39b /dev/rbd0 on /var/lib/kubelet/pods/076fff13-0ce9-11e8-8b93-901b0e8fc39b/volumes/kubernetes.io~rbd/pvc-076df6ee-0ce9-11e8-8b93-901b0e8fc39b type ext4 (rw,relatime,stripe=1024,data=ordered)
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - cat <<EOF >/etc/apt/sources.list.d/kubernetes.list deb http://apt.kubernetes.io/ kubernetes-xenial main EOF apt update apt install -y docker.io kubelet kubeadm kubernetes-cni ceph-common python
kubeadm token list
kubeadm token create --print-join-command
kubeadm join --token cb9141.6a912d1dd7f66ff5 8.8.8.8:6443 --discovery-token-ca-cert-hash sha256:f0ec6d8f9699169089c89112e0e6b5905b4e1b42db22815186240777970dc6fd
curl https://storage.googleapis.com/kubernetes-helm/helm-v2.8.0-linux-amd64.tar.gz | tar -xz ./linux-amd64/helm init
Source: https://habr.com/ru/post/348688/