$ yum install etcd-3.1.9
ETCD_NAME=c00test01 ETCD_DATA_DIR=/var/lib/etcd #[cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS=https://10.10.12.101:2380 ETCD_INITIAL_CLUSTER=c00test01=https://10.10.12.101:2380 ETCD_INITIAL_CLUSTER_STATE=new ETCD_INITIAL_CLUSTER_TOKEN=etcd-k8-cluster ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380 ETCD_ADVERTISE_CLIENT_URLS=https://10.10.12.101:2379 ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379 #[proxy] ETCD_PROXY="off" #[security] ETCD_CA_FILE=/etc/etcd/certs/ca.crt ETCD_TRUSTED_CA_FILE=/etc/etcd/certs/ca.crt ETCD_CERT_FILE=/etc/etcd/certs/server.crt ETCD_KEY_FILE=/etc/etcd/certs/server.key ETCD_CLIENT_CERT_AUTH=true ETCD_PEER_CA_FILE=/etc/etcd/certs/ca.crt ETCD_PEER_TRUSTED_CA_FILE=/etc/etcd/certs/ca.crt ETCD_PEER_CERT_FILE=/etc/etcd/certs/peer.crt ETCD_PEER_KEY_FILE=/etc/etcd/certs/peer.key ETCD_PEER_CLIENT_CERT_AUTH=true
$ mkdir /usr/lib/systemd/system/etcd.service.d
[Service] ExecStart= ExecStart=/usr/bin/etcd
$ chmod -R 644 /usr/lib/systemd/system/etcd.service.d $ chown -R root:root /usr/lib/systemd/system/etcd.service.d $ systemctl daemon-reload
$ mkdir /tmp/easyrsa $ cd /tmp/easyrsa $ curl -sSL -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz $ tar xzf easy-rsa.tar.gz $ cd easy-rsa-master/easyrsa3
$ ./easyrsa --batch init-pki # $ ./easyrsa --batch --req-cn="10.10.12.101" build-ca nopass # etcd $ ./easyrsa --batch --subject-alt-name="IP:10.10.12.101 DNS:c00test01" build-server-full server nopass # peer- etcd $ ./easyrsa --batch --subject-alt-name "IP:10.10.12.101 DNS:c00test01" build-server-full peer nopass # kube-apiserver flannel $ ./easyrsa --batch build-client-full client nopass
$ mkdir /etc/etcd/certs $ cp -p pki/ca.crt /etc/etcd/certs/ca.crt $ cp -p pki/issued/* /etc/etcd/certs/ $ cp -p pki/private/* /etc/etcd/certs/ $ chmod –R 440 /etc/etcd/certs/ # $ rm -rf /tmp/easyrsa/pki/
$ systemctl enable etcd && systemctl start etcd
{ "Network": "172.96.0.0/12", "SubnetLen": 24, "Backend": { "Type": "vxlan" } }
# 'cluster.lan' $ /usr/bin/etcdctl --cert-file=/etc/flanneld/certs/client.crt --key-file=/etc/flanneld/certs/client.key --ca-file=/etc/flanneld/certs/ca.crt --no-sync --peers=https://10.10.12.101:2379 set /cluster.lan/network/config < /tmp/flannel-conf.json
$ yum install flannel-0.7.1
# 'cluster.lan' FLANNEL_ETCD="https://c00test01:2379" FLANNEL_ETCD_ENDPOINTS="https://c00test01:2379" FLANNEL_ETCD_KEY="/cluster.lan/network" FLANNEL_ETCD_PREFIX="/cluster.lan/network" FLANNEL_ETCD_CAFILE="/etc/flanneld/certs/ca.crt" FLANNEL_ETCD_CERTFILE="/etc/flanneld/certs/client.crt" FLANNEL_ETCD_KEYFILE="/etc/flanneld/certs/client.key" FLANNEL_OPTIONS="-etcd-cafile /etc/flanneld/certs/ca.crt -etcd-certfile /etc/flanneld/certs/client.crt -etcd-keyfile /etc/flanneld/certs/client.key"
$ mkdir –R /etc/flanneld/certs $ cp /etc/etcd/certs/ca.pem /etc/flanneld/certs/ca.crt $ cp /etc/etcd/certs/client.crt /etc/flanneld/certs/client.crt $ cp /etc/etcd/certs/client.key /etc/flanneld/certs/client.key $ chmod –R 440 /etc/flanneld/certs/
$ systemctl enable flanneld && systemctl start flannel
$ yum install docker-1.12.6 $ systemctl enable docker && systemctl start docker
$ mkdir /tmp/k8s $ cd /tmp/k8s $ rpms=(kubernetes-master kubernetes-client kubernetes-node) $ for i in ${rpms[*]}; do wget https://kojipkgs.fedoraproject.org/packages/kubernetes/1.8.1/1.fc28/x86_64/${i}-1.8.1-1.fc28.x86_64.rpm; done $ yum install kubernetes-master-1.8.1-1.fc28.x86_64.rpm kubernetes-client-1.8.1-1.fc28.x86_64.rpm kubernetes-node-1.8.1-1.fc28.x86_64.rpm
$ cd /tmp/easyrsa $ ./easyrsa --batch init-pki # $ ./easyrsa --batch --req-cn="10.10.12.101" build-ca nopass # apiserver-. alt-names ip dns - $ ./easyrsa --batch --subject-alt-name="IP:172.30.0.1 DNS:kubernetes DNS:kubernetes.default DNS:kubernetes.default.svc DNS:kubernetes.default.svc.cluster.lan IP:10.10.12.101 DNS:c00test01" build-server-full server nopass # kubelet. , alt-names hostname , # 'kubectl log', 'kubectl exec' 'certificate signed by unknown authority' $ ./easyrsa --batch --subject-alt-name "DNS:c00test01 DNS:c00test02" build-server-full apiserver-kubelet-client nopass # kubelet kubectl $ ./easyrsa --batch build-client-full kubelet nopass $ ./easyrsa --batch build-client-full kubectl nopass
$ cp -p pki/ca.crt /etc/kubernetes/certs/ca.crt $ cp -p pki/issued/* /etc/kubernetes/certs/ $ cp -p pki/private/* /etc/kubernetes/certs/ $ chown –R kube:kube /etc/kubernetes/certs/ $ chmod –R 440 /etc/kubernetes/certs/ # $ rm -rf /tmp/easyrsa/pki/
$ mkdir /etc/kubernetes/certs/etcd $ cd /etc/etcd/certs $ cp ca.crt /etc/kubecnetes/certs/etcd/ca.crt $ cp client.crt /etc/kubecnetes/certs/etcd/client.crt $ cp client.key /etc/kubecnetes/certs/etcd/client.key
rm /etc/kubernetes/config
KUBE_API_ADDRESS="--bind-address=0.0.0.0" KUBE_API_PORT="--secure-port=6443" # KUBELET_PORT="--kubelet-port=10250" KUBE_ETCD_SERVERS="--etcd-servers=https://c00test01:2379 --etcd-cafile=/etc/kubernetes/certs/etcd/ca.crt --etcd-certfile=/etc/kubernetes/certs/etcd/client.crt --etcd-keyfile=/etc/kubernetes/certs/etcd/client.key" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=172.30.0.0/16" KUBE_ADMISSION_CONTROL="--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota" KUBE_API_ARGS="--tls-cert-file=/etc/kubernetes/certs/server.crt \ --tls-private-key-file=/etc/kubernetes/certs/server.key \ --tls-ca-file=/etc/kubernetes/certs/ca.crt \ --client-ca-file=/etc/kubernetes/certs/ca.crt \ --kubelet-certificate-authority=/etc/kubernetes/certs/ca.crt \ --kubelet-client-certificate=/etc/kubernetes/certs/apiserver-kubelet-client.crt \ --kubelet-client-key=/etc/kubernetes/certs/apiserver-kubelet-client.key \ --token-auth-file=/etc/kubernetes/tokens/known_tokens.csv \ --service-account-key-file=/etc/kubernetes/certs/server.crt \ --bind-address=0.0.0.0 \ --insecure-port=0 \ --apiserver-count=1 \ --basic-auth-file=/etc/kubernetes/certs/basic.cnf \ --anonymous-auth=false \ --allow-privileged=true"
$ mkdir /etc/kubernetes/tokens $ touch /etc/kubernetes/tokens/known_tokens.csv
$ setcap cap_net_bind_service=ep /usr/bin/kube-apiserver
$ touch /etc/kubernetes/certs/basic.cnf
admin,password,001 deploy,deploy,002
#!/bin/bash accounts=(system:controller_manager system:scheduler system:kubectl system:dns system:kubelet system:proxy) for account in ${accounts[*]}; do token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) echo "${token},${account},${account}" >> "/etc/kubernetes/tokens/known_tokens.csv" echo "${token}" > "/etc/kubernetes/tokens/${account}.token" done
$ systemctl enable kube-apiserver && systemctl start kube-apiserver
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \ --service-account-private-key-file=/etc/kubernetes/certs/server.key \ --root-ca-file=/etc/kubernetes/certs/ca.crt "
# 'cluster.lan' apiVersion: v1 kind: Config current-context: controller-manager-to-cluster.lan preferences: {} clusters: - cluster: certificate-authority: /etc/kubernetes/certs/ca.crt server: https://c00test01:6443 name: cluster.lan contexts: - context: cluster: cluster.lan user: controller-manager name: controller-manager-to-cluster.lan users: - name: controller-manager user: # token /etc/kubernetes/tokens/system:controller-manager.token token: cW6ha9WHzTK9Y4psT9pMKcUqfr673ydF
$ systemctl enable kube-controller-manager && systemctl start kube-controller-manager
KUBE_SCHEDULER_ARGS="--kubeconfig=/etc/kubernetes/scheduler.kubeconfig"
# 'cluster.lan' apiVersion: v1 kind: Config current-context: scheduler-to-cluster.lan preferences: {} clusters: - cluster: certificate-authority: /etc/kubernetes/certs/ca.crt server: https://c00test01:6443 name: cluster.lan contexts: - context: cluster: cluster.lan user: scheduler name: scheduler-to-cluster.lan users: - name: scheduler user: # token /etc/kubernetes/tokens/system:scheduler.token token: A2cU20Q9MkzdK8ON6UnVaP1nusWNKrWT
$ systemctl enable kube-scheduler && systemctl start kube-scheduler
# 'cluster.lan' apiVersion: v1 kind: Config current-context: kubectl-to-cluster.lan preferences: {} clusters: - cluster: certificate-authority: /etc/kubernetes/certs/ca.crt server: https://c00test01:6443 name: cluster.lan contexts: - context: cluster: cluster.lan user: kubectl name: kubectl-to-cluster.lan users: - name: kubectl user: client-certificate: /etc/kubernetes/certs/kubectl.crt client-key: /etc/kubernetes/certs/kubectl.key
$ cat <<EOF | kubectl create –f – apiVersion: v1 kind: ReplicationController metadata: name: kube-dns-v20 namespace: kube-system labels: k8s-app: kube-dns version: v20 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: kube-dns version: v20 template: metadata: labels: k8s-app: kube-dns version: v20 spec: # # - affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - c00test01 # containers: - name: kubedns image: gcr.io/google_containers/kubedns-amd64:1.8 resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi livenessProbe: httpGet: path: /healthz-kubedns port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP initialDelaySeconds: 3 timeoutSeconds: 5 args: # command = "/kube-dns" # 'cluster.lan' - --domain=cluster.lan. - --dns-port=10053 ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - name: dnsmasq image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4 livenessProbe: httpGet: path: /healthz-dnsmasq port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --cache-size=1000 - --no-resolv - --server=127.0.0.1#10053 - --log-facility=- ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - name: healthz image: gcr.io/google_containers/exechealthz-amd64:1.2 resources: limits: memory: 50Mi requests: cpu: 10m memory: 50Mi args: # 'cluster.lan' - --cmd=nslookup kubernetes.default.svc.cluster.lan 127.0.0.1 >/dev/null - --url=/healthz-dnsmasq - --cmd=nslookup kubernetes.default.svc.cluster.lan 127.0.0.1:10053 >/dev/null - --url=/healthz-kubedns - --port=8080 - --quiet ports: - containerPort: 8080 protocol: TCP dnsPolicy: Default tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master - key: CriticalAddonsOnly operator: Exists EOF
$ cat <<EOF | kubectl create –f – apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns # 10 , # apiserver- '-–service-cluster-ip-range' clusterIP: 172.30.0.10 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP EOF
$ mkdir /tmp/k8s $ cd /tmp/k8s $ rpms=(kubernetes-client kubernetes-node);for i in ${rpms[*]}; do wget https://kojipkgs.fedoraproject.org/packages/kubernetes/1.8.1/1.fc28/x86_64/${i}-1.8.1-1.fc28.x86_64.rpm; done $ yum install kubernetes-client-1.8.1-1.fc28.x86_64.rpm kubernetes-node-1.8.1-1.fc28.x86_64.rpm
KUBELET_ADDRESS="--address=0.0.0.0" # KUBELET_PORT="--port=10250" KUBELET_HOSTNAME="--hostname-override=c00test02" KUBELET_ARGS="--register-node=true \ --tls-cert-file=/etc/kubernetes/certs/apiserver-kubelet-client.crt \ --tls-private-key-file=/etc/kubernetes/certs/apiserver-kubelet-client.key \ --require-kubeconfig=true \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --pod-manifest-path=/etc/kubernetes/manifests \ --cgroup-driver=systemd \ --allow-privileged=true \ --cluster-domain=cluster.lan \ --authorization-mode=Webhook \ --fail-swap-on=false \ --cluster-dns=172.30.0.10"
$ base64 /etc/kubernetes/certs/ca.crt
apiVersion: v1 kind: Config current-context: kubelet-to-cluster.lan # change 'cluster.lan to your cluster name' preferences: {} clusters: - cluster: certificate-authority-data: <_ca.crt__base64> server: https://c00test01:6443 name: cluster.lan # change 'cluster.lan to your cluster name' contexts: - context: cluster: cluster.lan # change 'cluster.lan to your cluster name' user: kubelet name: kubelet-to-cluster.lan # change 'cluster.lan to your cluster name' users: - name: kubelet user: client-certificate: /etc/kubernetes/certs/kubelet.crt client-key: /etc/kubernetes/certs/kubelet.key
apiVersion: v1 kind: Config current-context: proxy-to-cluster.lan # change 'cluster.lan to your cluster name' preferences: {} contexts: - context: cluster: cluster.lan # change 'cluster.lan to your cluster name' user: proxy name: proxy-to-cluster.lan # change 'cluster.lan to your cluster name' clusters: - cluster: certificate-authority-data: <_ca.crt__base64> server: https://c00test01:6443 name: cluster.lan # change 'cluster.lan to your cluster name' users: - name: proxy user: client-certificate: /etc/kubernetes/certs/kubelet.crt client-key: /etc/kubernetes/certs/kubelet.key
$ systemctl enable kubelet && systemctl enable kube-proxy $ systemctl start kubelet && systemctl start kube-proxy
$ kubectl get nodes NAME STATUS ROLES AGE VERSION c00test01 Ready master 5m v1.8.1 c00test02 Ready <none> 4m v1.8.1
$ kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system kube-dns-v20-2jqsj 3/3 Running 0 3m
Source: https://habr.com/ru/post/342232/
All Articles