1.下载Centos7系统依赖包

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# CentOS 7+ (64 bit)
## 需要可以访问公网和外网拉镜像
## 更新系统
## 更换源
mkdir /etc/yum.repos.d-bak
mv /etc/yum.repos.d/* /etc/yum.repos.d-bak/
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
yum clean all
yum makecache fast -y

# 创建rpm包目录
mkdir -p /data/centos7metas
mkdir -p /data/centos7rpms
yum install --downloadonly --installroot=/data/centos7metas --releasever=7 --downloaddir=/data/centos7rpms createrepo yum-utils lvm2 wget vim net-tools device-mapper-persistent-data ipset ipvsadm bash-completion

# 安装createrepo
cd /data/centos7rpms/
rpm -Uvh deltarpm-3.6-3.el7.x86_64.rpm libxml2-python-2.9.1-6.el7_9.6.x86_64.rpm python-deltarpm-3.6-3.el7.x86_64.rpm libxml2-2.9.1-6.el7_9.6.x86_64.rpm createrepo-0.9.9-28.el7.noarch.rpm

# 移除网络base
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup

# 创建本地yum源
cat >/etc/yum.repos.d/offline-centos.repo<<EOF
[offline-centos]
name=CentOS-7 - offline
baseurl=file:///data/centos7rpms
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF
createrepo --database /data/centos7rpms/
yum clean all
yum makecache fast -y

# 通过本地yum源安装相关包
yum repolist | grep offline-centos
yum install yum-utils lvm2 wget vim net-tools device-mapper-persistent-data bash-completion --disableexcludes=offline-centos

初始化系统

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# 设置主机名
hostnamectl set-hostname k8s-master01

# 设置hosts文件
cat >/etc/hosts<<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.31.250.1 vip.k8s.local
172.31.250.1 k8s-master01
172.31.250.2 k8s-node01
EOF

# 禁用 swap
swapoff -a && sysctl -w vm.swappiness=0
sed -i '/ swap / s/^/#/' /etc/fstab
grep "swap" /etc/fstab

# 关闭selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
grep -vE "#|^$" /etc/sysconfig/selinux

# 关闭防火墙
systemctl disable --now firewalld

# 加载内核模块
cat <<EOF > /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter

# 修改内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF
sysctl --system

# 安装ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum install ipset ipvsadm --disableexcludes=offline-centos

# 创建lvm
## 1.查看服务器增加的磁盘名称
lsblk
fdisk -l

## 2.创建物理卷
pvcreate /dev/vdb
pvs

## 3.创建卷组
vgcreate -s 4M vg-data /dev/vdb
vgs

## 4.创建逻辑卷
lvcreate -L 199G -n lv-data vg-data
lvs

## 5.格式化lv
mkfs.xfs /dev/vg-data/lv-data

## 6.创建并挂载
mkdir /data

## 7.开机自动挂载
blkid
var_uuid=`blkid | grep lv--data | awk -F'"' '{print $2}'`
echo $var_uuid
echo "UUID=$var_uuid /data                   xfs     defaults        0 0" >> /etc/fstab
cat /etc/fstab
mount -a
reboot

安装containerd

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# 安装containerd
yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine -y

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum makecache fast -y
yum list containerd.io --showduplicates --disableexcludes=docker-ce-stable | sort -r

# 恢复网络base
mv /etc/yum.repos.d/CentOS-Base.repo.backup /etc/yum.repos.d/CentOS-Base.repo
yum clean all
yum makecache fast -y

# 创建rpm包目录
mkdir -p /data/docker-ce-stable-meta
mkdir -p /data/docker-ce-stable
yum install --disableexcludes=docker-ce-stable --downloadonly --installroot=/data/docker-ce-stable-meta --releasever=7 --downloaddir=/data/docker-ce-stable containerd.io-1.6.33

# 移除网络base
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
rm -rf /etc/yum.repos.d/docker-ce.repo

# 下载gpgkey文件
curl -o /etc/pki/rpm-gpg/docker-gpg https://download.docker.com/linux/centos/gpg

# 创建本地yum源
cat >/etc/yum.repos.d/offline-docker-ce-stable.repo<<EOF
[offline-docker-ce-stable]
name=docker-ce-stable-offline
baseurl=file:///data/docker-ce-stable
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/docker-gpg
EOF
createrepo --database /data/docker-ce-stable/
yum clean all
yum makecache fast -y

# 通过本地yum源安装相关包
yum repolist | grep offline-docker-ce-stable
yum install containerd.io-1.6.33 --disableexcludes=offline-docker-ce-stable

# 修改配置
mv /etc/containerd/config.toml /etc/containerd/config.toml_bak
containerd config default | tee /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
sed -i 's@registry.k8s.io/pause@registry.cn-hangzhou.aliyuncs.com/google_containers/pause@' /etc/containerd/config.toml
systemctl daemon-reload
systemctl enable --now containerd
ctr version

安装Kubernetes

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
yum makecache fast -y
yum list kubelet kubeadm kubectl --showduplicates --disableexcludes=kubernetes | sort -r

# 恢复网络base
mv /etc/yum.repos.d/CentOS-Base.repo.backup /etc/yum.repos.d/CentOS-Base.repo
yum clean all
yum makecache fast -y

# 创建rpm包目录
mkdir -p /data/kubernetes-meta
mkdir -p /data/kubernetes
yum install --disableexcludes=kubernetes --downloadonly --installroot=/data/kubernetes-meta --releasever=7 --downloaddir=/data/kubernetes kubelet-1.31.0 kubeadm-1.31.0 kubectl-1.31.0

# 移除网络base
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
rm -rf /etc/yum.repos.d/kubernetes.repo

# 下载gpgkey文件
cd /etc/pki/rpm-gpg/
wget https://pkgs.k8s.io/core:/stable:/v1.31/rpm/repodata/repomd.xml.key
mv repomd.xml.key kubernetes-repomd.xml.key
cat kubernetes-repomd.xml.key

# 创建本地yum源
cat >/etc/yum.repos.d/offline-kubernetes.repo<<EOF
[offline-kubernetes]
name=kubernetes-offline
baseurl=file:///data/kubernetes
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/kubernetes-repomd.xml.key
EOF
createrepo --database /data/kubernetes/
yum clean all
yum makecache fast -y

# 通过本地yum源安装相关包
yum repolist | grep offline-kubernetes
yum list kubelet kubeadm kubectl --showduplicates --disableexcludes=offline-kubernetes | sort -r
yum install kubelet-1.31.0 kubeadm-1.31.0 kubectl-1.31.0 --disableexcludes=offline-kubernetes
systemctl enable --now kubelet

初始化Kubernetes

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# 一下只在master节点执行
APISERVER_LB='vip.k8s.local'
HOST_IP=`ip addr show eth0 | grep 'inet ' | awk '{print $2}' | awk -F '/' '{print $1}'`
echo $HOST_IP
K8SVERSION='1.31.0'

kubeadm config print init-defaults
# kubeadm config print join-defaults

mkdir ~/kubernetes
cat <<EOF | tee ~/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
nodeRegistration:
  name: "$HOSTNAME"
  criSocket: "unix:///var/run/containerd/containerd.sock"
  imagePullPolicy: IfNotPresent
localAPIEndpoint:
  advertiseAddress: $HOST_IP
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
kubernetesVersion: $K8SVERSION
controlPlaneEndpoint: "$APISERVER_LB:6443"
certificatesDir: /etc/kubernetes/pki
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
clusterName: kubernetes
etcd:
  local:
    dataDir: /var/lib/etcd
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
rotateCertificates: true
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
EOF

kubeadm config images pull --config ~/kubernetes/kubeadm-config.yaml

# 导出镜像
mkdir -p /data/images/
cd /data/images/
ctr -n k8s.io images export kube-apiserver-v1.31.0.tar registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.31.0 --platform linux/amd64
ctr -n k8s.io images export kube-controller-manager-v1.31.0.tar registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.31.0 --platform linux/amd64
ctr -n k8s.io images export kube-scheduler-v1.31.0.tar registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.31.0 --platform linux/amd64
ctr -n k8s.io images export kube-proxy-v1.31.0.tar registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.31.0 --platform linux/amd64
ctr -n k8s.io images export coredns-v1.11.1.tar registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.11.1 --platform linux/amd64
ctr -n k8s.io images export pause-3.10.tar registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10 --platform linux/amd64
ctr -n k8s.io images export etcd-3.5.15-0.tar registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.15-0 --platform linux/amd64

# 导入镜像
ctr -n k8s.io images import kube-apiserver-v1.31.0.tar --platform linux/amd64
ctr -n k8s.io images import kube-controller-manager-v1.31.0.tar --platform linux/amd64
ctr -n k8s.io images import kube-scheduler-v1.31.0.tar --platform linux/amd64
ctr -n k8s.io images import kube-proxy-v1.31.0.tar --platform linux/amd64
ctr -n k8s.io images import coredns-v1.11.1.tar --platform linux/amd64
ctr -n k8s.io images import pause-3.10.tar --platform linux/amd64
ctr -n k8s.io images import etcd-3.5.15-0.tar --platform linux/amd64

kubeadm init --config ~/kubernetes/kubeadm-config.yaml --upload-certs --v 5 | tee ~/kubernetes/kubeadm-init.out

export KUBECONFIG=/etc/kubernetes/admin.conf
echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >>~/.bashrc
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo 'source <(kubectl completion bash)' >>~/.bashrc

kubeadm join vip.k8s.local:6443 --token pp1s9l.tee1li944ujwlaip \
        --discovery-token-ca-cert-hash sha256:6f9e6ad15db7a188b2038948f8a5f473f13a7db27975ab6a49e8c5621f51f3fb \
        --control-plane --certificate-key 25dd84032443252935f93b2687c733459e4694eb08e2646f2c7dda49b5b703dc

kubeadm join vip.k8s.local:6443 --token pp1s9l.tee1li944ujwlaip \
        --discovery-token-ca-cert-hash sha256:6f9e6ad15db7a188b2038948f8a5f473f13a7db27975ab6a49e8c5621f51f3fb

清理k8s

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
# 如果需要可以清理后重新安装
## node 节点
kubectl drain k8s-node01 --delete-emptydir-data --force --ignore-daemonsets
kubeadm reset
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
ipvsadm -C
kubectl delete node <node name>

## control plane 节点
kubeadm reset
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
ipvsadm -C

安装helm

1
2
3
4
5
6
7
8
9
mkdir -p /data/helm/
cd /data/helm/
wget https://get.helm.sh/helm-v3.15.4-linux-amd64.tar.gz
tar xf helm-v3.15.4-linux-amd64.tar.gz
cp /data/helm/linux-amd64/helm /usr/local/bin/helm
helm search hub wordpress

source <(helm completion bash)
helm completion bash > /etc/bash_completion.d/helm

安装calico(可选)

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
# 安装 Pod 网络附加组件 calico
## 在线安装最新版 calico
mkdir -p /data/calico/
cd /data/calico/
helm repo add projectcalico https://docs.tigera.io/calico/charts
helm search repo tigera-operator
helm show values projectcalico/tigera-operator --version v3.28.1
helm pull projectcalico/tigera-operator --version v3.28.1
tar xf tigera-operator-v3.28.1.tgz

cat <<EOF | tee /data/calico/tigera-operator/values.yaml
# imagePullSecrets is a special helm field which, when specified, creates a secret
# containing the pull secret which is used to pull all images deployed by this helm chart and the resulting operator.
# this field is a map where the key is the desired secret name and the value is the contents of the imagePullSecret.
#
# Example: --set-file imagePullSecrets.gcr=./pull-secret.json
imagePullSecrets: {}

installation:
  enabled: true
  kubernetesProvider: ""
  calicoNetwork:
    ipPools:
    - cidr: 10.244.0.0/16
  # imagePullSecrets are configured on all images deployed by the tigera-operator.
  # secrets specified here must exist in the tigera-operator namespace; they won't be created by the operator or helm.
  # imagePullSecrets are a slice of LocalObjectReferences, which is the same format they appear as on deployments.
  #
  # Example: --set installation.imagePullSecrets[0].name=my-existing-secret
  imagePullSecrets: []

apiServer:
  enabled: true

defaultFelixConfiguration:
  enabled: false

certs:
  node:
    key:
    cert:
    commonName:
  typha:
    key:
    cert:
    commonName:
    caBundle:

# Resource requests and limits for the tigera/operator pod.
resources: {}

# Tolerations for the tigera/operator pod.
tolerations:
- effect: NoExecute
  operator: Exists
- effect: NoSchedule
  operator: Exists

# NodeSelector for the tigera/operator pod.
nodeSelector:
  kubernetes.io/os: linux

# Affinity for the tigera/operator pod.
affinity: {}

# PriorityClassName for the tigera/operator pod.
priorityClassName: ""

# Custom annotations for the tigera/operator pod.
podAnnotations: {}

# Custom labels for the tigera/operator pod.
podLabels: {}

# Image and registry configuration for the tigera/operator pod.
tigeraOperator:
  image: tigera/operator
  version: v1.34.3
  registry: quay.io
calicoctl:
  image: docker.io/calico/ctl
  tag: v3.28.1

kubeletVolumePluginPath: /var/lib/kubelet

# Optionally configure the host and port used to access the Kubernetes API server.
kubernetesServiceEndpoint:
  host: ""
  port: "6443"
EOF

helm upgrade --install --debug --dry-run \
  --namespace tigera-operator \
  --create-namespace \
  calico ./tigera-operator

# ctr 导出镜像
mkdir -p /data/images/calico/
cd /data/images/calico/
ctr -n k8s.io images ls | awk '{print $1}' | grep -v '^sha256:' | grep -v '@sha256:'
ctr -n k8s.io images ls | grep calico | awk '{print $1}' | grep v3.28.1
ctr -n k8s.io images export calico-apiserver-v3.28.1.tar docker.io/calico/apiserver:v3.28.1 --platform linux/amd64
ctr -n k8s.io images export calico-cni-v3.28.1.tar docker.io/calico/cni:v3.28.1 --platform linux/amd64
ctr -n k8s.io images export calico-csi-v3.28.1.tar docker.io/calico/csi:v3.28.1 --platform linux/amd64
ctr -n k8s.io images export calico-kube-controllers-v3.28.1.tar docker.io/calico/kube-controllers:v3.28.1 --platform linux/amd64
ctr -n k8s.io images export calico-node-driver-registrar-v3.28.1.tar docker.io/calico/node-driver-registrar:v3.28.1 --platform linux/amd64
ctr -n k8s.io images export calico-node-v3.28.1.tar docker.io/calico/node:v3.28.1 --platform linux/amd64
ctr -n k8s.io images export calico-pod2daemon-flexvol-v3.28.1.tar docker.io/calico/pod2daemon-flexvol:v3.28.1 --platform linux/amd64
ctr -n k8s.io images export calico-typha-v3.28.1.tar docker.io/calico/typha:v3.28.1 --platform linux/amd64

ctr -n k8s.io images ls | grep tigera | awk '{print $1}' | grep v1.34.3
ctr -n k8s.io images export tigera-operator-v1.34.3.tar quay.io/tigera/operator:v1.34.3 --platform linux/amd64

# ctr 导入镜像
ctr -n k8s.io images import calico-apiserver-v3.28.1.tar --platform linux/amd64
ctr -n k8s.io images import calico-cni-v3.28.1.tar --platform linux/amd64
ctr -n k8s.io images import calico-csi-v3.28.1.tar --platform linux/amd64
ctr -n k8s.io images import calico-kube-controllers-v3.28.1.tar --platform linux/amd64
ctr -n k8s.io images import calico-node-driver-registrar-v3.28.1.tar --platform linux/amd64
ctr -n k8s.io images import calico-node-v3.28.1.tar --platform linux/amd64
ctr -n k8s.io images import calico-pod2daemon-flexvol-v3.28.1.tar --platform linux/amd64
ctr -n k8s.io images import calico-typha-v3.28.1.tar --platform linux/amd64

ctr -n k8s.io images import tigera-operator-v1.34.3.tar --platform linux/amd64

helm upgrade --install \
  --namespace tigera-operator \
  --create-namespace \
  calico ./tigera-operator

watch kubectl get pods -n tigera-operator
watch kubectl get pods -n calico-system

helm -n tigera-operator list
helm -n tigera-operator uninstall calico

安装flannel(可选)

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# 安装 Pod 网络附加组件 flannel
## 在线安装最新版 flannel
mkdir -p /data/flannel/
cd /data/flannel/
# Needs manual creation of namespace to avoid helm error
# kubectl create ns kube-flannel
# kubectl label --overwrite ns kube-flannel pod-security.kubernetes.io/enforce=privileged

helm repo add flannel https://flannel-io.github.io/flannel/
helm search repo flannel
helm show values flannel/flannel --version v0.25.6
helm pull flannel/flannel --version v0.25.6
tar xf flannel.tgz

helm upgrade --install --debug --dry-run \
 --namespace kube-flannel \
 --create-namespace \
 --set podCidr="10.244.0.0/16" \
 flannel ./flannel

# ctr 导出镜像
mkdir -p /data/images/
cd /data/images/
ctr -n k8s.io images ls | awk '{print $1}' | grep -v '^sha256:' | grep -v '@sha256:'
ctr -n k8s.io images ls | grep flannel | awk '{print $1}' | grep -v '^sha256:' | grep -v '@sha256:'

ctr -n k8s.io images export flannel-cni-plugin-v1.5.1-flannel2.tar docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel2 --platform linux/amd64
ctr -n k8s.io images export flannel-v0.25.6.tar docker.io/flannel/flannel:v0.25.6 --platform linux/amd64

# ctr 导入镜像
ctr -n k8s.io images import flannel-cni-plugin-v1.5.1-flannel2.tar --platform linux/amd64
ctr -n k8s.io images import flannel-v0.25.6.tar --platform linux/amd64

cd /data/flannel/
helm upgrade --install \
 --namespace kube-flannel \
 --create-namespace \
 --set podCidr="10.244.0.0/16" \
 flannel ./flannel

watch kubectl get pods -n kube-flannel

helm -n kube-flannel list
helm -n kube-flannel uninstall flannel

安装metrics-server

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# 去除master污点
kubectl describe nodes k8s-master01 | grep 'Taints:'
kubectl taint nodes k8s-master01 node-role.kubernetes.io/control-plane-

mkdir -p /data/metrics-server/
cd /data/metrics-server/

helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
helm repo update
helm search repo metrics-server
helm pull metrics-server/metrics-server --version 3.12.1
tar xf metrics-server-3.12.1.tgz

helm upgrade --install --dry-run --debug \
--set args[0]="--kubelet-insecure-tls" \
--set image.repository='registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server' \
--set image.tag="v0.7.1" \
metrics-server ./metrics-server

# ctr 导出镜像
mkdir -p /data/images/
cd /data/images/
ctr -n k8s.io images ls | awk '{print $1}' | grep -v '^sha256:' | grep -v '@sha256:'
ctr -n k8s.io images ls | grep metrics-server | awk '{print $1}' | grep v0.7.1
ctr -n k8s.io images export pause-3.6.tar registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6 --platform linux/amd64
ctr -n k8s.io images export metrics-server-v0.7.1.tar registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.7.1 --platform linux/amd64

# ctr 导入镜像
ctr -n k8s.io images import metrics-server-v0.7.1.tar --platform linux/amd64

helm upgrade --install \
--set args[0]="--kubelet-insecure-tls" \
--set image.repository='registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server' \
--set image.tag="v0.7.1" \
metrics-server ./metrics-server

# 查看状态
helm list
helm status metrics-server

watch kubectl get pods

安装nginx ingress

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
mkdir -p /data/ingress-nginx/
cd /data/ingress-nginx/
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm search repo ingress-nginx
helm pull ingress-nginx/ingress-nginx --version 4.11.2
tar xf ingress-nginx-4.11.2.tgz
helm show values ./ingress-nginx

# 安装 ingress-nginx
kubectl label nodes k8s-master01 hasIngress=true
kubectl get nodes -l hasIngress=true

cd /data/ingress-nginx/

helm upgrade --install --dry-run --debug \
  --namespace ingress-nginx \
  --create-namespace \
  --set controller.opentelemetry.enabled=true \
  --set controller.hostNetwork=true \
  --set controller.kind=DaemonSet \
  --set-string controller.nodeSelector.hasIngress=true \
  --set controller.metrics.enabled=true \
  --set-string controller.podAnnotations."prometheus\.io/scrape"="true" \
  --set-string controller.podAnnotations."prometheus\.io/port"="10254" \
  --set controller.service.enabled=false \
  ingress-nginx ./ingress-nginx

# ctr 导出镜像
mkdir -p /data/images/
cd /data/images/
ctr -n k8s.io images ls | awk '{print $1}' | grep -v '^sha256:' | grep -v '@sha256:'
ctr -n k8s.io images ls | grep ingress-nginx | awk '{print $1}'
ctr -n k8s.io images export ingress-nginx-controller.tar registry.k8s.io/ingress-nginx/controller@sha256:d5f8217feeac4887cb1ed21f27c2674e58be06bd8f5184cacea2a69abaf78dce --platform linux/amd64
ctr -n k8s.io images export ingress-nginx-kube-webhook-certgen.tar registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 --platform linux/amd64
ctr -n k8s.io images export ingress-nginx-opentelemetry-1.25.3.tar registry.k8s.io/ingress-nginx/opentelemetry-1.25.3@sha256:f7604ac0547ed64d79b98d92133234e66c2c8aade3c1f4809fed5eec1fb7f922 --platform linux/amd64

# ctr 导入镜像
ctr -n k8s.io images import ingress-nginx-controller.tar --platform linux/amd64
ctr -n k8s.io images import ingress-nginx-kube-webhook-certgen.tar --platform linux/amd64
ctr -n k8s.io images import ingress-nginx-opentelemetry-1.25.3.tar --platform linux/amd64

# 添加tag
ctr -n k8s.io images tag registry.k8s.io/ingress-nginx/controller@sha256:d5f8217feeac4887cb1ed21f27c2674e58be06bd8f5184cacea2a69abaf78dce registry.k8s.io/ingress-nginx/controller:v1.11.2
ctr -n k8s.io images tag registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
ctr -n k8s.io images tag registry.k8s.io/ingress-nginx/opentelemetry-1.25.3@sha256:f7604ac0547ed64d79b98d92133234e66c2c8aade3c1f4809fed5eec1fb7f922 registry.k8s.io/ingress-nginx/opentelemetry-1.25.3:v20240813-b933310d

helm upgrade --install \
  --namespace ingress-nginx \
  --create-namespace \
  --set controller.opentelemetry.enabled=true \
  --set controller.hostNetwork=true \
  --set controller.kind=DaemonSet \
  --set-string controller.nodeSelector.hasIngress=true \
  --set controller.metrics.enabled=true \
  --set-string controller.podAnnotations."prometheus\.io/scrape"="true" \
  --set-string controller.podAnnotations."prometheus\.io/port"="10254" \
  --set controller.service.enabled=false \
  ingress-nginx ./ingress-nginx

# 配置 default ingressclasses
kubectl patch ingressclasses nginx -p '{"metadata": {"annotations":{"ingressclass.kubernetes.io/is-default-class":"true"}}}'

# 查看状态
helm -n ingress-nginx list
helm -n ingress-nginx status ingress-nginx
watch kubectl -n ingress-nginx get pods

创建local的Storage Class

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
mkdir -p /data/StorageClass/pv
cd /data/StorageClass

cat > storage-class.yml <<EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
EOF

kubectl apply -f storage-class.yml
kubectl get sc

cat > pv.yml <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: local-pv
spec:
  capacity:
    storage: 50Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-storage
  local:
    path: /data/StorageClass/pv
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master01
EOF

kubectl apply -f pv.yml
kubectl get pv

cat > nginx-pvc.yml <<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nginx-pvc
spec:
  accessModes:
  - ReadWriteOnce
  storageClassName: local-storage
  resources:
    requests:
      storage: 10Gi
EOF

kubectl apply -f nginx-pvc.yml
kubectl get pvc

cat > deploy-nginx.yml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.24.0
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: storage
      volumes:
        - name: storage
          persistentVolumeClaim:
            claimName: nginx-pvc
EOF

kubectl apply -f deploy-nginx.yml
kubectl delete -f deploy-nginx.yml
kubectl delete -f nginx-pvc.yml
kubectl delete -f pv.yml

kubectl get pvc
kubectl get pod -o wide

安装mysql

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
# 安装mysql
export APP_NAME="mysql"
export STORAGE_CLASS="local-storage"
export PVC_VOLUME_MODE="Filesystem"
export PVC_RESOURCES_REQUESTS="10Gi"
export PVC_ACCESS_MODES="ReadWriteOnce"
export NAMESPACE="demo"
export REPLICAS=1
export IMAGE_REGISTRY_NAME="docker.io/library"
export IMAGE="mysql:8.0.39"
export RESOURCES_REQUESTS_CPU="500m"
export RESOURCES_LIMITS_CPU="1000m"
export RESOURCES_REQUESTS_MEMORY="512Mi"
export RESOURCES_LIMITS_MEMORY="1Gi"
export CONTAINER_PORT=3306
export PORT_NAME="mysql"
export PORT_PROTOCOL="TCP"
export SERVICE_PORT=3306
export DATA_VOLUMES_NAME="data"
export DATA_VOLUMES_MOUNTPATH="/var/lib/mysql"

export CONFIG_VOLUMES_NAME="config"
export CONFIG_VOLUMES_MOUNTPATH="/etc/mysql/conf.d/config-file.cnf"
export CONFIG_VOLUMES_SUBPATH="config-file.cnf"
export SERVICE_PORT_NAME="mysql"
export MYSQL_ROOT_PWD=`echo -n 'MyPassword' | base64`

mkdir -p /data/demo/${APP_NAME}
cd /data/demo/${APP_NAME}

kubectl create namespace demo

cat <<EOF > /data/demo/${APP_NAME}/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: ${APP_NAME}-config
  namespace: ${NAMESPACE}
data:
  ${CONFIG_VOLUMES_SUBPATH}: |-
    [mysqld]
    default_authentication_plugin=mysql_native_password
    explicit_defaults_for_timestamp
    character-set-server=UTF8
    collation-server=utf8_general_ci
    max_connections=1000
    lower_case_table_names=1
    max_connect_errors=1000
EOF

kubectl apply -f /data/demo/${APP_NAME}/configmap.yaml

cat <<EOF > /data/demo/${APP_NAME}/secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ${APP_NAME}-secret
  namespace: ${NAMESPACE}
data:
  mysql-root-password: ${MYSQL_ROOT_PWD}
EOF

kubectl apply -f /data/demo/${APP_NAME}/secret.yaml

mkdir -p /data/StorageClass/${APP_NAME}-pv
mkdir -p /data/StorageClass/${APP_NAME}-pv-mysql

cat <<EOF > /data/demo/${APP_NAME}/pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ${APP_NAME}-pv
spec:
  capacity:
    storage: 50Gi
  accessModes:
  - ${PVC_ACCESS_MODES}
  persistentVolumeReclaimPolicy: Retain
  storageClassName: ${STORAGE_CLASS}
  local:
    path: /data/StorageClass/${APP_NAME}-pv
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master01
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ${APP_NAME}-pv-mysql
spec:
  capacity:
    storage: 50Gi
  accessModes:
  - ${PVC_ACCESS_MODES}
  persistentVolumeReclaimPolicy: Retain
  storageClassName: ${STORAGE_CLASS}
  local:
    path: /data/StorageClass/${APP_NAME}-pv-mysql
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master01
EOF

kubectl apply -f /data/demo/${APP_NAME}/pv.yaml

cat <<EOF > /data/demo/${APP_NAME}/pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: ${APP_NAME}-data
  namespace: ${NAMESPACE}
spec:
  storageClassName: ${STORAGE_CLASS}
  volumeMode: ${PVC_VOLUME_MODE}
  resources:
    requests:
      storage: ${PVC_RESOURCES_REQUESTS}
  accessModes:
    - ${PVC_ACCESS_MODES}
EOF

kubectl apply -f /data/demo/${APP_NAME}/pvc.yaml

cat <<EOF > /data/demo/${APP_NAME}/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: ${APP_NAME}-svc-headless
  namespace: ${NAMESPACE}
  labels:
    app: ${APP_NAME}
spec:
  ports:
    - name: ${SERVICE_PORT_NAME}
      port: ${SERVICE_PORT}
      protocol: ${PORT_PROTOCOL}
      targetPort: ${CONTAINER_PORT}
  selector:
    app: ${APP_NAME}
  type: ClusterIP
  clusterIP: None
EOF

cat <<EOF > /data/demo/${APP_NAME}/service-nodeport.yaml
apiVersion: v1
kind: Service
metadata:
  name: ${APP_NAME}-svc
  namespace: ${NAMESPACE}
  labels:
    app: ${APP_NAME}
spec:
  ports:
    - name: ${SERVICE_PORT_NAME}
      port: ${SERVICE_PORT}
      protocol: ${PORT_PROTOCOL}
      targetPort: ${CONTAINER_PORT}
  selector:
    app: ${APP_NAME}
  type: NodePort
EOF


kubectl apply -f /data/demo/${APP_NAME}/service.yaml

cat <<EOF > /data/demo/${APP_NAME}/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: ${NAMESPACE}
  name: ${APP_NAME}-statefulset
spec:
  selector:
    matchLabels:
      app: ${APP_NAME}
  serviceName: ${APP_NAME}-svc-headless
  replicas: ${REPLICAS} # 此处定义副本数量
  volumeClaimTemplates:
    - metadata:
        name: ${APP_NAME}-data
      spec:
        accessModes: 
          - ${PVC_ACCESS_MODES}
        storageClassName: ${STORAGE_CLASS}
        resources:
          requests:
            storage: ${PVC_RESOURCES_REQUESTS}
  template:
    metadata:
      labels:
        app: ${APP_NAME}
    spec:
      tolerations:
        - effect: NoExecute
          key: node.kubernetes.io
          operator: Equal
          value: unexecutable
      volumes:
        - name: ${DATA_VOLUMES_NAME}
          persistentVolumeClaim:
            claimName: ${APP_NAME}-data # 此处填pvc名称
        - name: ${CONFIG_VOLUMES_NAME}
          configMap:
            name: ${APP_NAME}-config # 此处填configmap名称
      containers:
        - name: ${APP_NAME}
          image: ${IMAGE_REGISTRY_NAME}/${IMAGE} # 此处填写业务镜像信息
          env:
            - name: MYSQL_ROOT_PASSWORD
              valueFrom:
                secretKeyRef:
                  key: mysql-root-password
                  name: ${APP_NAME}-secret
            - name: TZ
              value: Asia/Shanghai
          ports: # 定义容器端口信息
            - containerPort: ${CONTAINER_PORT}
              name: ${PORT_NAME}
              protocol: ${PORT_PROTOCOL}
          resources: # 定义容器资源配置,cpu和内存需求和限制
            requests:
              cpu: ${RESOURCES_REQUESTS_CPU}
              memory: ${RESOURCES_REQUESTS_MEMORY}
            limits:
              cpu: ${RESOURCES_LIMITS_CPU}
              memory: ${RESOURCES_LIMITS_MEMORY}
          volumeMounts: # 定义容器卷挂载配置
            - name: ${DATA_VOLUMES_NAME}
              mountPath: ${DATA_VOLUMES_MOUNTPATH}
            - name: ${CONFIG_VOLUMES_NAME}
              mountPath: ${CONFIG_VOLUMES_MOUNTPATH}
              subPath: ${CONFIG_VOLUMES_SUBPATH}
          readinessProbe: # 定义就绪探针
            exec:
              command:
                - /bin/bash
                - '-ec'
                - |
                  password_aux="\${MYSQL_ROOT_PASSWORD:-}"
                  if [[ -f "\${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
                      password_aux=\$(cat "\$MYSQL_ROOT_PASSWORD_FILE")
                  fi
                  mysqladmin status -uroot -p"\${password_aux}"
            failureThreshold: 5
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 3
          livenessProbe: # 定义存活探针
            exec:
              command:
                - /bin/bash
                - '-ec'
                - |
                  password_aux="\${MYSQL_ROOT_PASSWORD:-}"
                  if [[ -f "\${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
                      password_aux=\$(cat "\$MYSQL_ROOT_PASSWORD_FILE")
                  fi
                  mysqladmin status -uroot -p"\${password_aux}"
            failureThreshold: 5
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 3
          startupProbe: # 定义启动探针
            exec:
              command:
                - /bin/bash
                - '-ec'
                - |
                  password_aux="\${MYSQL_ROOT_PASSWORD:-}"
                  if [[ -f "\${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
                      password_aux=\$(cat "\$MYSQL_ROOT_PASSWORD_FILE")
                  fi
                  mysqladmin status -uroot -p"\${password_aux}"
            failureThreshold: 10
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 3
EOF

kubectl apply -f /data/demo/${APP_NAME}/statefulset.yaml

# 测试
kubectl -n demo get events --sort-by .lastTimestamp -w
kubectl -n demo get pods 
kubectl -n demo exec -it mysql-statefulset-0 -c mysql -- /bin/bash
mysql -uroot -p'MyPassword'

# ctr 导入镜像
mkdir -p /data/images/demo
cd /data/images/demo
ctr -n k8s.io images export mysql-8.0.39.tar docker.io/library/mysql:8.0.39 --platform linux/amd64

# 导入镜像
ctr -n k8s.io images import mysql-8.0.39.tar --platform linux/amd64

ctr -n k8s.io images ls | grep mysql | awk '{print $1}'

安装redis

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# 安装redis
mkdir -p /data/demo/redis
cd /data/demo/redis
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
helm pull bitnami/redis --version 16.13.2
tar xf redis-16.13.2.tgz

# 查看镜像信息
helm upgrade --install --dry-run --debug \
  --namespace=demo \
  --create-namespace \
  --set global.storageClass=local-storage \
  --set global.redis.password='MyRedisPassword' \
  --set architecture=standalone \
  --set auth.enabled=true \
  redis ./redis

export APP_NAME='redis'
mkdir -p /data/StorageClass/${APP_NAME}-pv
mkdir -p /data/StorageClass/${APP_NAME}-pv-redis

cat <<EOF > /data/demo/${APP_NAME}/pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ${APP_NAME}-pv
spec:
  capacity:
    storage: 50Gi
  accessModes:
  - ${PVC_ACCESS_MODES}
  persistentVolumeReclaimPolicy: Retain
  storageClassName: ${STORAGE_CLASS}
  local:
    path: /data/StorageClass/${APP_NAME}-pv
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master01
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ${APP_NAME}-pv-redis
spec:
  capacity:
    storage: 50Gi
  accessModes:
  - ${PVC_ACCESS_MODES}
  persistentVolumeReclaimPolicy: Retain
  storageClassName: ${STORAGE_CLASS}
  local:
    path: /data/StorageClass/${APP_NAME}-pv-redis
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master01
EOF

kubectl apply -f /data/demo/${APP_NAME}/pv.yaml

helm upgrade --install \
  --namespace=demo \
  --create-namespace \
  --set global.storageClass=local-storage \
  --set global.redis.password='MyRedisPassword' \
  --set architecture=standalone \
  --set auth.enabled=true \
  redis ./redis

watch kubectl -n demo get pods -o wide
kubectl -n demo get pvc
kubectl -n demo exec -it redis-master-0 -c redis -- /bin/bash

# redis-master:6379
# password:MyRedisPassword

# ctr 导入镜像
mkdir -p /data/images/demo
cd /data/images/demo
ctr -n k8s.io images export redis-6.2.7-debian-11-r11.tar docker.io/bitnami/redis:6.2.7-debian-11-r11 --platform linux/amd64

# 导入镜像
ctr -n k8s.io images import redis-6.2.7-debian-11-r11.tar --platform linux/amd64

ctr -n k8s.io images ls | grep redis | awk '{print $1}'

安装minio

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
mkdir -p /data/demo/minio
cd /data/demo/minio
wget https://dl.min.io/server/minio/release/linux-amd64/archive/minio-20240829014052.0.0-1.x86_64.rpm -O minio.rpm
yum install minio.rpm

export MINIO_ROOT_USER='minioadmin'
export MINIO_ROOT_PASSWORD='minioadminpass'
mkdir ~/minio
minio server ~/minio --console-address :9001

MINIO_ROOT_USER
MINIO_ROOT_PASSWORD


# # 安装minio-operator
# mkdir -p /data/demo/minio
# cd /data/demo/minio
# helm repo add minio-operator https://operator.min.io
# helm search repo minio-operator
# helm pull minio-operator/operator --version 6.0.3
# tar xf operator-6.0.3.tgz

# # Deploy Operator With Helm
# helm upgrade --install --dry-run --debug \
#   --namespace minio-operator \
#   --create-namespace \
#   operator ./operator | grep 'image: '

# # Deploy Operator With Helm
# helm upgrade --install \
#   --namespace minio-operator \
#   --create-namespace \
#   operator ./operator

# watch kubectl -n minio-operator get all

# # ctr 导入镜像
# mkdir -p /data/images/demo
# cd /data/images/demo
# ctr -n k8s.io images export minio-operator-v6.0.3.tar quay.io/minio/operator:v6.0.3 --platform linux/amd64

# # 导入镜像
# ctr -n k8s.io images import minio-operator-v6.0.3.tar --platform linux/amd64

# ctr -n k8s.io images ls | grep minio | awk '{print $1}'


# # 安装minio-tenant
# mkdir -p /data/demo/minio
# cd /data/demo/minio
# helm repo add minio-operator https://operator.min.io
# helm search repo minio-operator
# helm pull minio-operator/tenant --version 6.0.3
# tar xf tenant-6.0.3.tgz


# # Deploy tenant With Helm
# helm upgrade --install --dry-run --debug \
#   --namespace demo \
#   --create-namespace \
#   --set tenant.name=demo \
#   --set tenant.pools[0].server=1 \
#   --set tenant.pools[0].name=pool-0 \
#   --set tenant.pools[0].volumesPerServer=1 \
#   --set tenant.pools[0].size=10Gi \
#   --set tenant.pools[0].storageClassName=local-storage \
#   --set tenant.pools[0].securityContext.runAsUser=1000 \
#   --set tenant.pools[0].securityContext.runAsGroup=1000 \
#   --set tenant.pools[0].securityContext.fsGroup=1000 \
#   --set tenant.pools[0].securityContext.fsGroupChangePolicy="OnRootMismatch" \
#   --set tenant.pools[0].securityContext.runAsNonRoot=true \
#   --set tenant.pools[0].containerSecurityContext.runAsUser=1000 \
#   --set tenant.pools[0].containerSecurityContext.runAsGroup=1000 \
#   --set tenant.pools[0].containerSecurityContext.runAsNonRoot=true \
#   --set tenant.pools[0].containerSecurityContext.allowPrivilegeEscalation=false \
#   --set tenant.pools[0].containerSecurityContext.capabilities.drop[0]=ALL \
#   --set tenant.pools[0].containerSecurityContext.seccompProfile.type=RuntimeDefault \
#   --set ingress.api.enabled=true \
#   --set ingress.api.ingressClassName='nginx' \
#   --set ingress.api.host='minio.local' \
#   --set ingress.console.enabled=true \
#   --set ingress.console.ingressClassName='nginx' \
#   --set ingress.console.host='minio-console.local' \
#   miniotenant ./tenant | grep 'image: '

# export APP_NAME='miniotenant'
# mkdir -p /data/StorageClass/${APP_NAME}-pv-0
# mkdir -p /data/StorageClass/${APP_NAME}-pv-1
# mkdir -p /data/StorageClass/${APP_NAME}-pv-2
# mkdir -p /data/StorageClass/${APP_NAME}-pv-3

# cat <<EOF > /data/demo/minio/pv.yaml
# apiVersion: v1
# kind: PersistentVolume
# metadata:
#   name: ${APP_NAME}-pv-0
# spec:
#   capacity:
#     storage: 50Gi
#   accessModes:
#   - ${PVC_ACCESS_MODES}
#   persistentVolumeReclaimPolicy: Retain
#   storageClassName: ${STORAGE_CLASS}
#   local:
#     path: /data/StorageClass/${APP_NAME}-pv-0
#   nodeAffinity:
#     required:
#       nodeSelectorTerms:
#       - matchExpressions:
#         - key: kubernetes.io/hostname
#           operator: In
#           values:
#           - k8s-master01
# ---
# apiVersion: v1
# kind: PersistentVolume
# metadata:
#   name: ${APP_NAME}-pv-1
# spec:
#   capacity:
#     storage: 50Gi
#   accessModes:
#   - ${PVC_ACCESS_MODES}
#   persistentVolumeReclaimPolicy: Retain
#   storageClassName: ${STORAGE_CLASS}
#   local:
#     path: /data/StorageClass/${APP_NAME}-pv-1
#   nodeAffinity:
#     required:
#       nodeSelectorTerms:
#       - matchExpressions:
#         - key: kubernetes.io/hostname
#           operator: In
#           values:
#           - k8s-master01
# ---
# apiVersion: v1
# kind: PersistentVolume
# metadata:
#   name: ${APP_NAME}-pv-2
# spec:
#   capacity:
#     storage: 50Gi
#   accessModes:
#   - ${PVC_ACCESS_MODES}
#   persistentVolumeReclaimPolicy: Retain
#   storageClassName: ${STORAGE_CLASS}
#   local:
#     path: /data/StorageClass/${APP_NAME}-pv-2
#   nodeAffinity:
#     required:
#       nodeSelectorTerms:
#       - matchExpressions:
#         - key: kubernetes.io/hostname
#           operator: In
#           values:
#           - k8s-master01
# ---
# apiVersion: v1
# kind: PersistentVolume
# metadata:
#   name: ${APP_NAME}-pv-3
# spec:
#   capacity:
#     storage: 50Gi
#   accessModes:
#   - ${PVC_ACCESS_MODES}
#   persistentVolumeReclaimPolicy: Retain
#   storageClassName: ${STORAGE_CLASS}
#   local:
#     path: /data/StorageClass/${APP_NAME}-pv-3
#   nodeAffinity:
#     required:
#       nodeSelectorTerms:
#       - matchExpressions:
#         - key: kubernetes.io/hostname
#           operator: In
#           values:
#           - k8s-master01
# EOF

# kubectl apply -f /data/demo/minio/pv.yaml


# # Deploy tenant With Helm
# helm upgrade --install \
#   --namespace demo \
#   --create-namespace \
#   --set tenant.name=demo \
#   --set tenant.pools[0].server=1 \
#   --set tenant.pools[0].name=pool-0 \
#   --set tenant.pools[0].volumesPerServer=1 \
#   --set tenant.pools[0].size=10Gi \
#   --set tenant.pools[0].storageClassName=local-storage \
#   --set tenant.pools[0].securityContext.runAsUser=1000 \
#   --set tenant.pools[0].securityContext.runAsGroup=1000 \
#   --set tenant.pools[0].securityContext.fsGroup=1000 \
#   --set tenant.pools[0].securityContext.fsGroupChangePolicy="OnRootMismatch" \
#   --set tenant.pools[0].securityContext.runAsNonRoot=true \
#   --set tenant.pools[0].containerSecurityContext.runAsUser=1000 \
#   --set tenant.pools[0].containerSecurityContext.runAsGroup=1000 \
#   --set tenant.pools[0].containerSecurityContext.runAsNonRoot=true \
#   --set tenant.pools[0].containerSecurityContext.allowPrivilegeEscalation=false \
#   --set tenant.pools[0].containerSecurityContext.capabilities.drop[0]=ALL \
#   --set tenant.pools[0].containerSecurityContext.seccompProfile.type=RuntimeDefault \
#   --set ingress.api.enabled=true \
#   --set ingress.api.ingressClassName='nginx' \
#   --set ingress.api.host='minio.local' \
#   --set ingress.console.enabled=true \
#   --set ingress.console.ingressClassName='nginx' \
#   --set ingress.console.host='minio-console.local' \
#   miniotenant ./tenant

# watch kubectl -n demo get all

# curl https://dl.min.io/client/mc/release/linux-amd64/mc \
#   --create-dirs \
#   -o $HOME/minio-binaries/mc

# chmod +x $HOME/minio-binaries/mc
# export PATH=$PATH:$HOME/minio-binaries/

# mc --help

# kubectl port-forward svc/demo-hl 9000 -n demo
# mc alias set myminio https://localhost:9000 minio minio123 --insecure
# mc mb myminio/mybucket --insecure

# # ctr 导入镜像
# mkdir -p /data/images/demo
# cd /data/images/demo
# ctr -n k8s.io images export minio-minio-v6.0.3.tar quay.io/minio/minio:RELEASE.2024-08-17T01-24-54Z --platform linux/amd64
# ctr -n k8s.io images export minio-operator-sidecar-v6.0.2.tar quay.io/minio/operator-sidecar:v6.0.2 --platform linux/amd64

# # 导入镜像
# ctr -n k8s.io images import minio-minio-v6.0.3.tar --platform linux/amd64
# ctr -n k8s.io images import minio-operator-sidecar-v6.0.2.tar --platform linux/amd64

# ctr -n k8s.io images ls | grep minio | awk '{print $1}' | grep -v '@sha256'

安装neo4j

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
mkdir -p /data/demo/neo4j
cd /data/demo/neo4j

mkdir -p /data/StorageClass/neo4j-pv

cat <<EOF > /data/demo/neo4j/pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: neo4j-pv
spec:
  capacity:
    storage: 50Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-storage
  local:
    path: /data/StorageClass/neo4j-pv
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master01
EOF

kubectl apply -f /data/demo/neo4j/pv.yaml
kubectl get pv

cat <<EOF > /data/demo/neo4j/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: neo4j-pvc
  namespace: demo
spec:
  accessModes:
  - ReadWriteOnce
  storageClassName: local-storage
  resources:
    requests:
      storage: 10Gi
EOF

kubectl apply -f /data/demo/neo4j/pvc.yaml
kubectl -n demo get pvc


cat <<EOF > /data/demo/neo4j/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: demo
  name: neo4j
spec:
  selector:
    matchLabels:
      app: neo4j
  replicas: 1
  template:
    metadata:
      labels:
        app: neo4j
    spec:
      volumes:
        - name: data
          persistentVolumeClaim:
            claimName: neo4j-pvc
      containers:
        - name: neo4j
          image: docker.io/library/neo4j:5.5.0-community
          imagePullPolicy: Always
          volumeMounts:
            - name: data
              mountPath: /data
          env:
            - name: TZ
              value: Asia/Shanghai
          ports:
            - containerPort: 7474
              name: http
              protocol: TCP
            - containerPort: 7687
              name: bolt
              protocol: TCP
          resources:
            requests:
              cpu: 200m
              memory: 512Mi
            limits:
              cpu: 2000m
              memory: 2Gi
          readinessProbe:
            tcpSocket:
              port: http
            initialDelaySeconds: 20
            periodSeconds: 10
          livenessProbe:
            tcpSocket:
              port: http
            initialDelaySeconds: 30
            periodSeconds: 20
          startupProbe:
            tcpSocket:
              port: http
            failureThreshold: 30
            periodSeconds: 10
            initialDelaySeconds: 100
EOF

kubectl apply -f /data/demo/neo4j/deployment.yaml
watch kubectl -n demo get pods

cat <<EOF > /data/demo/neo4j/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: neo4j
  namespace: demo
  labels:
    app: neo4j
spec:
  ports:
    - name: http
      port: 7474
      protocol: TCP
      targetPort: 7474
  selector:
    app: neo4j
  type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
  name: neo4j-bolt
  namespace: demo
  labels:
    app: neo4j
spec:
  ports:
    - name: bolt
      port: 7687
      protocol: TCP
      targetPort: 7687
  selector:
    app: neo4j
  type: ClusterIP
EOF

kubectl apply -f /data/demo/neo4j/service.yaml

cat <<EOF > /data/demo/neo4j/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: neo4j
  namespace: demo
spec:
  ingressClassName: nginx
  rules:
    - host: neo4j.local
      http:
        paths:
          - pathType: Prefix
            path: "/"
            backend:
              service:
                name: neo4j
                port:
                  number: 7474
EOF

# http://localhost:7474

kubectl apply -f /data/demo/neo4j/ingress.yaml
kubectl -n demo get ingress
kubectl -n demo describe ingress neo4j
curl -H 'Host:neo4j.local' http://172.31.250.1:80

# neo4j/neo4j

# ctr 导入镜像
mkdir -p /data/images/demo
cd /data/images/demo
ctr -n k8s.io images export neo4j-5.5.0-community.tar docker.io/library/neo4j:5.5.0-community --platform linux/amd64

# 导入镜像
ctr -n k8s.io images import neo4j-5.5.0-community.tar --platform linux/amd64

ctr -n k8s.io images ls | grep neo4j | awk '{print $1}'

安装nacos

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
mkdir -p /data/demo/nacos/
cd /data/demo/nacos/
curl --location --show-error --silent --output nacos-mysql-schema.sql https://raw.githubusercontent.com/alibaba/nacos/2.3.2/distribution/conf/mysql-schema.sql


kubectl -n demo cp /data/demo/nacos/nacos-mysql-schema.sql mysql-statefulset-0:/tmp/nacos-mysql-schema.sql
kubectl -n demo exec -it statefulset/mysql-statefulset -- bash
cd /tmp/
ls -l

mysql -uroot -p'MyPassword'
CREATE DATABASE IF NOT EXISTS nacos_config DEFAULT CHARACTER SET utf8 COLLATE utf8_bin;
CREATE USER 'nacos'@'%' IDENTIFIED WITH mysql_native_password BY 'MyNacosPassword' PASSWORD EXPIRE NEVER;
GRANT ALL ON nacos_config.* TO 'nacos'@'%';
FLUSH PRIVILEGES;
exit;
mysql -unacos -p'MyNacosPassword'
use nacos_config;
source nacos-mysql-schema.sql;
exit;
exit

mkdir -p /data/StorageClass/nacos-pv

cat <<EOF > /data/demo/nacos/pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nacos-pv
spec:
  capacity:
    storage: 50Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-storage
  local:
    path: /data/StorageClass/nacos-pv
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master01
EOF

kubectl apply -f /data/demo/nacos/pv.yaml
kubectl get pv

cat >/data/demo/nacos/nacos-deploy.yaml<<EOF
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nacos-cm
  namespace: demo
data:
  mysql.db.host: mysql-svc-headless
  mysql.db.name: nacos_config
  mysql.port: "3306"
  mysql.user: nacos
  mysql.password: MyNacosPassword
  mysql.param: characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false
---
apiVersion: v1
kind: Service
metadata:
  name: nacos-cs
  namespace: demo
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - port: 8848
      targetPort: 8848
      protocol: TCP
      name: http
    - port: 9848
      name: client-rpc
      targetPort: 9848
    - port: 9849
      name: raft-rpc
      targetPort: 9849
    - port: 7848
      name: old-raft-rpc
      targetPort: 7848
      protocol: TCP
  selector:
    app.kubernetes.io/name: nacos
    app.kubernetes.io/instance: nacos
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nacos
  namespace: demo
spec:
  serviceName: nacos-cs
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: nacos
      app.kubernetes.io/instance: nacos
  template:
    metadata:
      labels:
        app.kubernetes.io/name: nacos
        app.kubernetes.io/instance: nacos
    spec:
      containers:
        - name: nacos
          image: "docker.io/nacos/nacos-server:v2.3.2"
          imagePullPolicy: IfNotPresent
          startupProbe:
            initialDelaySeconds: 180
            periodSeconds: 5
            timeoutSeconds: 10
            httpGet:
              scheme: HTTP
              port: 8848
              path: /nacos/v1/console/health/readiness
          livenessProbe:
            initialDelaySeconds: 10
            periodSeconds: 5
            timeoutSeconds: 10
            httpGet:
              scheme: HTTP
              port: 8848
              path: /nacos/v1/console/health/liveness
          ports:
            - name: http
              containerPort: 8848
              protocol: TCP
            - containerPort: 9848
              name: client-rpc
            - containerPort: 9849
              name: raft-rpc
            - containerPort: 7848
              name: old-raft-rpc
          resources:
            requests:
              cpu: 500m
              memory: 512Mi
          env:
            - name: TZ
              value: Asia/Shanghai
            - name: NACOS_SERVER_PORT
              value: "8848"
            - name: NACOS_APPLICATION_PORT
              value: "8848"
            - name: PREFER_HOST_MODE
              value: "hostname"
            - name: MODE
              value: "standalone"
            - name: SPRING_DATASOURCE_PLATFORM
              value: "mysql"
            - name: MYSQL_SERVICE_HOST
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.db.host
            - name: MYSQL_SERVICE_DB_NAME
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.db.name
            - name: MYSQL_SERVICE_PORT
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.port
            - name: MYSQL_SERVICE_USER
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.user
            - name: MYSQL_SERVICE_PASSWORD
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.password
            - name: MYSQL_SERVICE_DB_PARAM
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.param
          volumeMounts:
            - name: nacos-data
              mountPath: /home/nacos/plugins/peer-finder
              subPath: peer-finder
            - name: nacos-data
              mountPath: /home/nacos/data
              subPath: data
            - name: nacos-data
              mountPath: /home/nacos/logs
              subPath: logs
  volumeClaimTemplates:
    - metadata:
        name: nacos-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 8Gi
        storageClassName: local-storage
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nacos
  namespace: demo
spec:
  ingressClassName: nginx
  rules:
  - host: nacos.local
    http:
      paths:
      - backend:
          service:
            name: nacos-cs
            port:
              number: 8848
        path: /
        pathType: Prefix
EOF

kubectl apply -f /data/demo/nacos/nacos-deploy.yaml
kubectl -n demo get events --sort-by .lastTimestamp -w
watch kubectl -n demo get pods
kubectl -n demo logs -f --tail=200 statefulset/nacos
kubectl -n demo describe ingress nacos
curl -H 'Host:nacos.local' http://172.31.250.1:80

# nacos/nacos

# ctr 导入镜像
mkdir -p /data/images/demo
cd /data/images/demo
ctr -n k8s.io images export nacos-server-v2.3.2.tar docker.io/nacos/nacos-server:v2.3.2 --platform linux/amd64

# 导入镜像
ctr -n k8s.io images import nacos-server-v2.3.2.tar --platform linux/amd64

ctr -n k8s.io images ls | grep nacos | awk '{print $1}'

helm 安装 apollo

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
mkdir -p /data/demo/apollo/
cd /data/demo/apollo/
curl --location --show-error --silent --output apolloconfigdb.sql https://raw.githubusercontent.com/apolloconfig/apollo/1.9.1/scripts/sql/apolloconfigdb.sql
curl --location --show-error --silent --output apolloportaldb.sql https://raw.githubusercontent.com/apolloconfig/apollo/1.9.1/scripts/sql/apolloportaldb.sql
kubectl -n demo cp /data/demo/apollo/apolloconfigdb.sql mysql-statefulset-0:/tmp/apolloconfigdb.sql
kubectl -n demo cp /data/demo/apollo/apolloportaldb.sql mysql-statefulset-0:/tmp/apolloportaldb.sql
kubectl -n demo exec -it statefulset/mysql-statefulset -- bash
cd /tmp/
ls -l
mysql -uroot -p'MyPassword'
CREATE DATABASE IF NOT EXISTS ApolloConfigDB DEFAULT CHARACTER SET utf8 COLLATE utf8_bin;
CREATE DATABASE IF NOT EXISTS ApolloPortalDB DEFAULT CHARACTER SET utf8 COLLATE utf8_bin;
CREATE USER 'apollo'@'%' IDENTIFIED WITH mysql_native_password BY 'MyApolloPassword' PASSWORD EXPIRE NEVER;
GRANT ALL ON ApolloConfigDB.* TO 'apollo'@'%';
GRANT ALL ON ApolloPortalDB.* TO 'apollo'@'%';
FLUSH PRIVILEGES;
exit;

mysql -uapollo -p'MyApolloPassword'
use ApolloConfigDB;
source apolloconfigdb.sql;
use ApolloPortalDB;
source apolloportaldb.sql;
exit;
exit


var_version='0.3.1'
helm repo add apollo https://www.apolloconfig.com/charts/
helm search repo apollo
helm pull apollo/apollo-service --version $var_version
tar xf apollo-service-$var_version.tgz

helm upgrade --install --dry-run --debug \
  --namespace demo \
  --create-namespace \
  --set adminService.replicaCount=1 \
  --set configService.replicaCount=1 \
  --set configdb.connectionStringProperties="characterEncoding=utf8&useSSL=false" \
  --set configdb.dbName=ApolloConfigDB \
  --set configdb.host=mysql-svc-headless \
  --set configdb.password="MyApolloPassword" \
  --set configdb.port=3306 \
  --set configdb.userName=apollo \
  apollo-service ./apollo-service

helm upgrade --install \
  --namespace demo \
  --create-namespace \
  --set adminService.replicaCount=1 \
  --set configService.replicaCount=1 \
  --set configdb.connectionStringProperties="characterEncoding=utf8&useSSL=false" \
  --set configdb.dbName=ApolloConfigDB \
  --set configdb.host=mysql-svc-headless \
  --set configdb.password="MyApolloPassword" \
  --set configdb.port=3306 \
  --set configdb.userName=apollo \
  apollo-service ./apollo-service

kubectl -n demo get events --sort-by .lastTimestamp -w
watch kubectl -n demo get pods
# kubectl -n demo logs -f apollo-service-apollo-adminservice-7659bc845c-xxrxw


helm pull apollo/apollo-portal --version $var_version
tar xf apollo-portal-$var_version.tgz
helm upgrade --install --dry-run --debug \
  --namespace demo \
  --create-namespace \
  --set config.envs="dev" \
  --set config.metaServers.dev=http://apollo-service-apollo-configservice:8080 \
  --set portaldb.connectionStringProperties="characterEncoding=utf8&useSSL=false" \
  --set portaldb.dbName=ApolloPortalDB \
  --set portaldb.host=mysql-svc-headless \
  --set portaldb.password="MyApolloPassword" \
  --set portaldb.port=3306 \
  --set portaldb.userName=apollo \
  --set replicaCount=1 \
  apollo-portal ./apollo-portal

helm upgrade --install \
  --namespace demo \
  --create-namespace \
  --set config.envs="dev" \
  --set config.metaServers.dev=http://apollo-service-apollo-configservice:8080 \
  --set portaldb.connectionStringProperties="characterEncoding=utf8&useSSL=false" \
  --set portaldb.dbName=ApolloPortalDB \
  --set portaldb.host=mysql-svc-headless \
  --set portaldb.password="MyApolloPassword" \
  --set portaldb.port=3306 \
  --set portaldb.userName=apollo \
  --set replicaCount=1 \
  apollo-portal ./apollo-portal

kubectl -n demo get events --sort-by .lastTimestamp -w
watch kubectl -n demo get pods

kubectl apply -f - <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: apollo
  namespace: demo
spec:
  ingressClassName: nginx
  rules:
  - host: apollo.local
    http:
      paths:
      - backend:
          service:
            name: apollo-portal
            port:
              number: 8070
        path: /
        pathType: Prefix
EOF


apollo.local
# apollo/admin

# ctr 导入镜像
mkdir -p /data/images/demo
cd /data/images/demo
ctr -n k8s.io images export apollo-adminservice-1.9.1.tar docker.io/apolloconfig/apollo-adminservice:1.9.1 --platform linux/amd64
ctr -n k8s.io images export apollo-configservice-1.9.1.tar docker.io/apolloconfig/apollo-configservice:1.9.1 --platform linux/amd64
ctr -n k8s.io images export apollo-portal-1.9.1.tar docker.io/apolloconfig/apollo-portal:1.9.1 --platform linux/amd64


# 导入镜像
ctr -n k8s.io images import apollo-adminservice-1.9.1.tar --platform linux/amd64
ctr -n k8s.io images import apollo-configservice-1.9.1.tar --platform linux/amd64
ctr -n k8s.io images import apollo-portal-1.9.1.tar --platform linux/amd64

ctr -n k8s.io images ls | grep apollo | awk '{print $1}'

安装dashboard

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
mkdir -p /data/demo/dashboard/
cd /data/demo/dashboard/
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm search repo kubernetes-dashboard
helm pull kubernetes-dashboard/kubernetes-dashboard --version 7.6.0
tar xf kubernetes-dashboard-7.6.0.tgz

helm upgrade --install --dry-run --debug \
  --namespace kubernetes-dashboard \
  --create-namespace \
  --set app.ingress.enabled=true \
  --set app.ingress.ingressClassName=nginx \
  --set cert-manager.enabled=true \
  --set app.ingress.hosts[0]=dashboard.local \
  kubernetes-dashboard ./kubernetes-dashboard

helm upgrade --install \
  --namespace kubernetes-dashboard \
  --create-namespace \
  --set app.ingress.enabled=true \
  --set app.ingress.ingressClassName=nginx \
  --set cert-manager.enabled=true \
  --set app.ingress.hosts[0]=dashboard.local \
  kubernetes-dashboard ./kubernetes-dashboard

kubectl -n kubernetes-dashboard get events --sort-by .lastTimestamp -w
watch kubectl -n kubernetes-dashboard get pods
kubectl -n kubernetes-dashboard get ingress

kubectl apply -f - <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
EOF

kubectl apply -f - <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF

kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"   
type: kubernetes.io/service-account-token  
EOF

kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d

# ctr 导入镜像
mkdir -p /data/images/demo
cd /data/images/demo
ctr -n k8s.io images export dashboard-api-1.8.0.tar docker.io/kubernetesui/dashboard-api:1.8.0 --platform linux/amd64
ctr -n k8s.io images export dashboard-auth-1.1.3.tar docker.io/kubernetesui/dashboard-auth:1.1.3 --platform linux/amd64
ctr -n k8s.io images export dashboard-metrics-scraper-1.1.1.tar docker.io/kubernetesui/dashboard-metrics-scraper:1.1.1 --platform linux/amd64
ctr -n k8s.io images export dashboard-web-1.4.0.tar docker.io/kubernetesui/dashboard-web:1.4.0 --platform linux/amd64
ctr -n k8s.io images export kong-3.6.tar docker.io/library/kong:3.6 --platform linux/amd64
ctr -n k8s.io images export cert-manager-cainjector-v1.14.5.tar quay.io/jetstack/cert-manager-cainjector:v1.14.5 --platform linux/amd64
ctr -n k8s.io images export cert-manager-controller-v1.14.5.tar quay.io/jetstack/cert-manager-controller:v1.14.5 --platform linux/amd64
ctr -n k8s.io images export cert-manager-startupapicheck-v1.14.5.tar quay.io/jetstack/cert-manager-startupapicheck:v1.14.5 --platform linux/amd64
ctr -n k8s.io images export cert-manager-webhook-v1.14.5.tar quay.io/jetstack/cert-manager-webhook:v1.14.5 --platform linux/amd64

# 导入镜像
ctr -n k8s.io images export dashboard-api-1.8.0.tar --platform linux/amd64
ctr -n k8s.io images export dashboard-auth-1.1.3.tar --platform linux/amd64
ctr -n k8s.io images export dashboard-metrics-scraper-1.1.1.tar --platform linux/amd64
ctr -n k8s.io images export dashboard-web-1.4.0.tar --platform linux/amd64
ctr -n k8s.io images export kong-3.6.tar --platform linux/amd64
ctr -n k8s.io images export cert-manager-cainjector-v1.14.5.tar --platform linux/amd64
ctr -n k8s.io images export cert-manager-controller-v1.14.5.tar --platform linux/amd64
ctr -n k8s.io images export cert-manager-startupapicheck-v1.14.5.tar --platform linux/amd64
ctr -n k8s.io images export cert-manager-webhook-v1.14.5.tar --platform linux/amd64

ctr -n k8s.io images ls | grep cert-manager | awk '{print $1}' | grep -v '@sha256:'
ctr -n k8s.io images ls | grep dashboard | awk '{print $1}' | grep -v '@sha256:'
ctr -n k8s.io images ls | grep kong | awk '{print $1}' | grep -v '@sha256:'