Kubernetes 安装

初始化系统

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# CentOS 7+ (64 bit)
## 更新系统
## 更换源
mkdir /etc/yum.repos.d-bak
mv /etc/yum.repos.d/* /etc/yum.repos.d-bak/
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
yum clean all
yum makecache fast -y
yum update -y

# 设置主机名
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node

# 设置hosts文件
cat >/etc/hosts<<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.31.243.139 vip.k8s.local
172.31.243.139 k8s-master
172.31.252.21 k8s-node
EOF

# 禁用 swap
swapoff -a && sysctl -w vm.swappiness=0
sed -i '/ swap / s/^/#/' /etc/fstab
grep "swap" /etc/fstab

# 关闭selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
grep -vE "#|^$" /etc/sysconfig/selinux

# 关闭防火墙
systemctl disable --now firewalld

# 加载内核模块
cat <<EOF > /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter

# 修改内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF
sysctl --system

# 安装ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum install ipset ipvsadm yum-utils wget vim net-tools bash-completion -y

安装containerd

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 安装containerd
yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine -y

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum makecache fast -y
yum list containerd.io --showduplicates | sort -r

# 通过本地yum源安装相关包
yum install containerd.io-1.6.33 -y

# 修改配置
mv /etc/containerd/config.toml /etc/containerd/config.toml_bak
containerd config default | tee /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
# sed -i 's@registry.k8s.io/pause@registry.cn-hangzhou.aliyuncs.com/google_containers/pause@' /etc/containerd/config.toml
systemctl daemon-reload
systemctl enable --now containerd
ctr version

安装Kubernetes

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
yum makecache fast -y
yum list kubelet kubeadm kubectl --showduplicates --disableexcludes=kubernetes | sort -r

# 通过本地yum源安装相关包
yum install kubelet-1.31.1 kubeadm-1.31.1 kubectl-1.31.1 --disableexcludes=kubernetes -y
systemctl enable --now kubelet

初始化Kubernetes

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
# 一下只在master节点执行
APISERVER_LB='vip.k8s.local'
HOST_IP=`ip addr show eth0 | grep 'inet ' | awk '{print $2}' | awk -F '/' '{print $1}'`
echo $HOST_IP
K8SVERSION='1.31.1'
SERVICESUBNET='10.96.0.0/12'
PODSUBNET='10.244.0.0/16'

kubeadm config print init-defaults
# kubeadm config print join-defaults

mkdir ~/kubernetes
cat <<EOF | tee ~/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
nodeRegistration:
  name: "$HOSTNAME"
  criSocket: "unix:///var/run/containerd/containerd.sock"
  imagePullPolicy: IfNotPresent
localAPIEndpoint:
  advertiseAddress: $HOST_IP
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
kubernetesVersion: $K8SVERSION
controlPlaneEndpoint: "$APISERVER_LB:6443"
certificatesDir: /etc/kubernetes/pki
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
clusterName: kubernetes
etcd:
  local:
    dataDir: /var/lib/etcd
networking:
  dnsDomain: cluster.local
  serviceSubnet: ${SERVICESUBNET}
  podSubnet: ${PODSUBNET}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
rotateCertificates: true
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
EOF

kubeadm config images pull --config ~/kubernetes/kubeadm-config.yaml
kubeadm init --config ~/kubernetes/kubeadm-config.yaml --upload-certs --v 5 | tee ~/kubernetes/kubeadm-init.out

export KUBECONFIG=/etc/kubernetes/admin.conf
echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>~/.bashrc
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo 'source <(kubectl completion bash)' >>~/.bashrc

kubeadm join vip.k8s.local:6443 --token bcdny5.okgoosfee0usp00a \
        --discovery-token-ca-cert-hash sha256:43e9eabe5b7bd351a525d07f64a24b6ed4a6d0ca443b8a4e3658b2dd46eac528 \
        --control-plane --certificate-key 48ca72a7a20156dc1f56fde9fd71c578ca1095ecbad9b52a7b8d46e3d5591209

kubeadm join vip.k8s.local:6443 --token bcdny5.okgoosfee0usp00a \
        --discovery-token-ca-cert-hash sha256:43e9eabe5b7bd351a525d07f64a24b6ed4a6d0ca443b8a4e3658b2dd46eac528

安装helm

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
HELMVERSION='v3.16.1'
mkdir -p ~/kubernetes/helm/
cd ~/kubernetes/helm/
wget https://get.helm.sh/helm-${HELMVERSION}-linux-amd64.tar.gz
tar xf helm-${HELMVERSION}-linux-amd64.tar.gz
cp ~/kubernetes/helm/linux-amd64/helm /usr/local/bin/helm
helm search hub wordpress

source <(helm completion bash)
helm completion bash > /etc/bash_completion.d/helm

安装calico

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# 安装 Pod 网络附加组件 calico
## 在线安装最新版 calico
mkdir -p ~/kubernetes/calico/
cd ~/kubernetes/calico/
helm repo add projectcalico https://docs.tigera.io/calico/charts
helm search repo tigera-operator
CALICOVERSION=`helm search repo tigera-operator -o yaml | grep 'version:' | grep -v 'app_version:' | awk '{print $2}'`
echo ${CALICOVERSION}

helm show values projectcalico/tigera-operator --version ${CALICOVERSION}
helm pull projectcalico/tigera-operator --version ${CALICOVERSION}
tar xf tigera-operator-${CALICOVERSION}.tgz

helm upgrade --install --debug --dry-run \
  --namespace tigera-operator \
  --create-namespace \
  --set installation.calicoNetwork.ipPools[0].cidr="${PODSUBNET}" \
  calico ./tigera-operator

helm upgrade --install \
  --namespace tigera-operator \
  --create-namespace \
  --set installation.calicoNetwork.ipPools[0].cidr="${PODSUBNET}" \
  calico ./tigera-operator

watch kubectl get pods -n tigera-operator
watch kubectl get pods -n calico-system

helm -n tigera-operator list
helm -n tigera-operator get values calico
helm -n tigera-operator get manifest calico
#helm -n tigera-operator uninstall calico

安装metrics-server

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
mkdir -p ~/kubernetes/metrics-server/
cd ~/kubernetes/metrics-server/

helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
helm repo update metrics-server
helm search repo metrics-server
METRICSSERVERVERSION=`helm search repo metrics-server -o yaml | grep 'version:' | grep -v 'app_version:' | awk '{print $2}'`
echo ${METRICSSERVERVERSION}
helm pull metrics-server/metrics-server --version ${METRICSSERVERVERSION}
tar xf metrics-server-${METRICSSERVERVERSION}.tgz

helm upgrade --install --dry-run --debug \
  --set args[0]="--kubelet-insecure-tls" \
  metrics-server ./metrics-server

helm upgrade --install \
  --set args[0]="--kubelet-insecure-tls" \
  metrics-server ./metrics-server

# 查看状态
watch kubectl get pods
helm list
helm status metrics-server
helm get values metrics-server
helm get manifest metrics-server

kubectl top nodes
kubectl top pods

安装nginx ingress

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
mkdir -p ~/kubernetes/ingress-nginx/
cd ~/kubernetes/ingress-nginx/
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update ingress-nginx
helm search repo ingress-nginx
INGRESSNGINXVERSION=`helm search repo ingress-nginx -o yaml | grep 'version:' | grep -v 'app_version:' | awk '{print $2}'`
echo ${INGRESSNGINXVERSION}
helm pull ingress-nginx/ingress-nginx --version ${INGRESSNGINXVERSION}
tar xf ingress-nginx-${INGRESSNGINXVERSION}.tgz

# 安装 ingress-nginx
kubectl label nodes k8s-node hasIngress=true
kubectl get nodes -l hasIngress=true

helm upgrade --install --dry-run --debug \
  --namespace ingress-nginx \
  --create-namespace \
  --set controller.opentelemetry.enabled=true \
  --set controller.hostNetwork=true \
  --set controller.kind=DaemonSet \
  --set-string controller.nodeSelector.hasIngress=true \
  --set controller.metrics.enabled=true \
  --set-string controller.podAnnotations."prometheus\.io/scrape"="true" \
  --set-string controller.podAnnotations."prometheus\.io/port"="10254" \
  --set controller.ingressClassResource.default=true \
  --set controller.service.enabled=false \
  ingress-nginx ./ingress-nginx

helm upgrade --install \
  --namespace ingress-nginx \
  --create-namespace \
  --set controller.opentelemetry.enabled=true \
  --set controller.hostNetwork=true \
  --set controller.kind=DaemonSet \
  --set-string controller.nodeSelector.hasIngress=true \
  --set controller.metrics.enabled=true \
  --set-string controller.podAnnotations."prometheus\.io/scrape"="true" \
  --set-string controller.podAnnotations."prometheus\.io/port"="10254" \
  --set controller.ingressClassResource.default=true \
  --set controller.service.enabled=false \
  ingress-nginx ./ingress-nginx

# 查看状态
helm -n ingress-nginx list
helm -n ingress-nginx status ingress-nginx
helm -n ingress-nginx get values ingress-nginx
helm -n ingress-nginx get manifest ingress-nginx

watch kubectl -n ingress-nginx get pods

安装cert-manager

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
mkdir -p ~/kubernetes/cert-manager
cd ~/kubernetes/cert-manager
helm repo add jetstack https://charts.jetstack.io
helm repo update jetstack
helm search repo jetstack

# 定义安装的版本信息
CERTMANAGERVERSION='v1.15.3'
echo ${CERTMANAGERVERSION}

# Before installing the chart, you must first install the cert-manager CustomResourceDefinition resources. 
wget https://github.com/cert-manager/cert-manager/releases/download/${CERTMANAGERVERSION}/cert-manager.crds.yaml
kubectl apply -f cert-manager.crds.yaml

helm pull jetstack/cert-manager --version ${CERTMANAGERVERSION}
tar xf cert-manager-${CERTMANAGERVERSION}.tgz
helm show values ./cert-manager

helm upgrade  --install --dry-run --debug \
  --namespace cert-manager \
  --create-namespace \
  cert-manager ./cert-manager

helm upgrade  --install \
  --namespace cert-manager \
  --create-namespace \
  cert-manager ./cert-manager

# 查看状态
helm -n cert-manager list
helm -n cert-manager status cert-manager
helm -n cert-manager get values cert-manager
helm -n cert-manager get manifest cert-manager
watch kubectl -n cert-manager get pods -o wide

# 卸载
#helm -n cert-manager uninstall cert-manager

安装dasboard

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
mkdir -p  ~/kubernetes/kubernetes-dashboard
cd  ~/kubernetes/kubernetes-dashboard
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm repo update kubernetes-dashboard
helm search repo kubernetes-dashboard

DASHBOARDVERSION=`helm search repo kubernetes-dashboard -o yaml | grep 'version:' | grep -v 'app_version:' | awk '{print $2}'`
echo ${DASHBOARDVERSION}
helm pull kubernetes-dashboard/kubernetes-dashboard --version ${DASHBOARDVERSION}
tar xf kubernetes-dashboard-${DASHBOARDVERSION}.tgz
helm show values ./kubernetes-dashboard

helm upgrade --install --dry-run --debug \
  --namespace kubernetes-dashboard \
  --create-namespace \
  --set metrics-server.enabled=false \
  --set nginx.enabled=false \
  --set cert-manager.enabled=false \
  --set app.ingress.enabled=true \
  --set app.ingress.ingressClassName=nginx \
  --set app.ingress.hosts[0]=dashboard.k8s.local \
  kubernetes-dashboard ./kubernetes-dashboard

helm upgrade --install  \
  --namespace kubernetes-dashboard \
  --create-namespace \
  --set metrics-server.enabled=false \
  --set nginx.enabled=false \
  --set cert-manager.enabled=false \
  --set app.ingress.enabled=true \
  --set app.ingress.ingressClassName=nginx \
  --set app.ingress.hosts[0]=dashboard.k8s.local \
  kubernetes-dashboard ./kubernetes-dashboard

kubectl -n kubernetes-dashboard get events --sort-by .lastTimestamp -w
watch kubectl -n kubernetes-dashboard get pods
kubectl -n kubernetes-dashboard get ingress

# 查看状态
helm -n kubernetes-dashboard list
helm -n kubernetes-dashboard status kubernetes-dashboard
helm -n kubernetes-dashboard get values kubernetes-dashboard
helm -n kubernetes-dashboard get manifest kubernetes-dashboard
watch kubectl -n kubernetes-dashboard get pods -o wide

# 卸载
#helm -n kubernetes-dashboard uninstall kubernetes-dashboard

# Creating a Service Account
kubectl apply -f - <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
EOF

# Creating a ClusterRoleBinding
kubectl apply -f - <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF

# 获取临时token
kubectl -n kubernetes-dashboard create token admin-user

# 创建永久token
kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"   
type: kubernetes.io/service-account-token
EOF

# 获取永久token
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d

# 清除账号
# kubectl -n kubernetes-dashboard delete serviceaccount admin-user
# kubectl -n kubernetes-dashboard delete clusterrolebinding admin-user