Use kubeadm to build k8s cluster

created at 08-27-2021 views: 8

Since I want to study KubeEdge later, and KubeEdge supports up to version 1.19, the k8s version I use is 1.19.9, and the platform used is the OpenStack private cloud platform.

Node plan and network segment configuration

The system version of the 6 cloud servers I use here is CentOS7.6, and the IP and the hosts file behind it need to be changed as follows:

192.168.80.10   k8s-master-lb # Equivalent to ELB/SLB, if it is not a high-availability cluster, change the IP to the IP of master01
192.168.80.11   k8s-master01 
192.168.80.12   k8s-master02
192.168.80.13   k8s-master03
192.168.80.21   k8s-node01
192.168.80.22   k8s-node02

Service network segment: 10.96.0.0/12

Pod network segment: 172.168.0.0/12

Basic configuration (first on one server)

hostnamectl set-hostname k8s-master01

Yum source configuration

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

synchronize time

yum install ntpdate
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com

Turn off firewall, swap, selinux, dnsmasq

# Turn off the firewall
systemctl stop firewalld && systemctl disable firewalld

# Close swap on all nodes, including the master node and worker nodes
# Remember to shut down, otherwise kubelet fails to start, the lesson of blood
swapoff -a
# Prohibit swap startup
sed -i'/swap/s/^\(.*\)$/#\1/g' /etc/fstab

# Close selinux
setenforce 0
sed -i's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
getenforce ##Check selinux status

# Close dnsmasq (otherwise it may cause the docker container to fail to resolve the domain name)
systemctl stop dnsmasq && systemctl disable dnsmasq

Install prerequisites

yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git ntpdate keepalived haproxy  -y

Configure parameters

vim /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

vim /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

The 3.10.x kernel that comes with the CentOS7.x system has some bugs, which cause the running Docker and kubernetes to be unstable. It is recommended to upgrade the kernel. The trouble of using container will be less. Upgrade the kernel to 4.19.12

wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
yum localinstall -y kernel-ml*

grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

# Setup is complete, restart
reboot

Install Docker-ce 19.03

yum install docker-ce-19.03.15-3.el7  docker-ce-cli-19.03.15-3.el7 -y
systemctl enable docker && systemctl restart docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://******.mirror.aliyuncs.com"]
}
EOF

Install kubeadm

# Find the version number to be installed
yum list kubeadm --showduplicates | sort -r
yum install -y kubeadm-1.19.8-0 kubelet-1.19.8-0
yum install -y kubectl-1.19.8-0

enable boot automatically:

systemctl enable kubelet && systemctl restart kubelet

High availability haproxy+KeepAlived

vim  /etc/haproxy/haproxy.cfg
# Add content as: Note that you need to modify your own ip address and other content according to the ip plan.
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master01   192.168.80.11:6443  check
  server k8s-master02   192.168.80.12:6443  check
  server k8s-master03   192.168.80.13:6443  check

KeepAlived

vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0 # Here you need to modify it to ifconfig to find out the network card information corresponding to the local LAN ip address
    mcast_src_ip 192.168.80.11 #Here needs to be modified to the specific master machine address
    virtual_router_id 51
    priority 101 #master01为101,master02和master03为100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.80.10 #Here needs to be modified to the address of vip
    }
    track_script {
       chk_apiserver
    }
}
vim /etc/keepalived/check_apiserver.sh
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
chmod +x /etc/keepalived/check_apiserver.sh
systemctl enable keepalived && systemctl enable haproxy
reboot

After restarting, verify whether the vip can be connected or not, and need to be processed again

ping 192.168.80.10
telnet 192.168.80.10 6443

4 more virtual machines can be copied, the cloud server is made into a mirror image, and 4 more are created

Configuration after copy

First configure the hostname and network of other machines

Configure the hosts file

Set the master01 node to log in to other nodes without a key:

ssh-keygen -t rsa
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

Modify the KeepAlived configuration files of master02 and master03

systemctl restart keepalived haproxy

Configure kubeadm-config.yaml in master01

vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.80.11
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - 192.168.80.10
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.80.10:16443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.19.9
networking:
  dnsDomain: cluster.local
  podSubnet: 172.168.0.0/12
  serviceSubnet: 10.96.0.0/12
scheduler: {}

Perform mirror pull

kubeadm config images pull --config /root/kubeadm-config.yaml

initialization

kubeadm init --config /root/kubeadm-config.yaml --upload-certs
# master join command
  kubeadm join 192.168.80.10:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:566390dce77ff59473f00bb4b06073303f3478ade3e2cd181f1c09b0239e9a60 \
    --control-plane --certificate-key 9068979f113454a0d517d6320eebd7b776851fbf5cf2f575bf76a410448546a9

# node join command
kubeadm join 192.168.80.10:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:566390dce77ff59473f00bb4b06073303f3478ade3e2cd181f1c09b0239e9a60 

Use the token obtained above to join other nodes

mkdir -p ~/.kube
cp -i /etc/kubernetes/admin.conf ~/.kube/config
$ kubectl get nodes
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   13m     v1.19.9
k8s-master02   NotReady   master   10m     v1.19.9
k8s-master03   NotReady   master   6m40s   v1.19.9
k8s-node01     NotReady   <none>   4m16s   v1.19.9
k8s-node02     NotReady   <none>   3m57s   v1.19.9

Download the calico.yaml file

curl https://docs.projectcalico.org/manifests/calico.yaml -O

The default Pod network segment pod CIDR 192.168.0.0/16, if you do not use the default, uncomment the CALICO_IPV4POOL_CIDR variable in the file and set it as your own pod CIDR.

vim calico.yaml
- name: CALICO_IPV4POOL_CIDR
  value: "172.168.0.0/12"
kubectl apply -f calico.yaml

Use the following command to check whether all pods have been started

$ kubectl get pods --all-namespaces
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-55ffdb7658-rp8xq   1/1     Running   6          18m
kube-system   calico-node-2l6xb                          1/1     Running   0          18m
kube-system   calico-node-65gwf                          1/1     Running   0          18m
kube-system   calico-node-7vcc2                          1/1     Running   0          18m
kube-system   calico-node-ms4ts                          1/1     Running   0          18m
kube-system   calico-node-r4crq                          1/1     Running   0          18m
kube-system   coredns-6c76c8bb89-99qxh                   1/1     Running   0          45m
kube-system   coredns-6c76c8bb89-sntts                   1/1     Running   0          45m
kube-system   etcd-k8s-master01                          1/1     Running   0          45m
kube-system   etcd-k8s-master02                          1/1     Running   0          19m
kube-system   etcd-k8s-master03                          1/1     Running   0          20m
kube-system   kube-apiserver-k8s-master01                1/1     Running   0          45m
kube-system   kube-apiserver-k8s-master02                1/1     Running   0          19m
kube-system   kube-apiserver-k8s-master03                1/1     Running   0          20m
kube-system   kube-controller-manager-k8s-master01       1/1     Running   2          45m
kube-system   kube-controller-manager-k8s-master02       1/1     Running   0          19m
kube-system   kube-controller-manager-k8s-master03       1/1     Running   0          20m
kube-system   kube-proxy-5p8xm                           1/1     Running   0          19m
kube-system   kube-proxy-8rkc5                           1/1     Running   0          45m
kube-system   kube-proxy-gnmlz                           1/1     Running   0          24m
kube-system   kube-proxy-lnr6r                           1/1     Running   0          19m
kube-system   kube-proxy-q4spk                           1/1     Running   0          20m
kube-system   kube-scheduler-k8s-master01                1/1     Running   1          45m
kube-system   kube-scheduler-k8s-master02                1/1     Running   0          19m
kube-system   kube-scheduler-k8s-master03                1/1     Running   0          20m

Deployment complete.

created at:08-27-2021
edited at: 08-27-2021: