环境说明
主机配置
主机名 内网地址 公网地址 角色 k8s-master01 192.168.0.134 120.46.211.33 master 节点 k8s-node01 192.168.0.41 120.46.217.5 node 节点 k8s-node02 192.168.0.194 120.46.141.143 node 节点 base-server01 192.168.0.9 124.70.46.58 etcd 节点 base-server02 192.168.0.30 124.70.56.151 etcd 节点 base-server03 192.168.0.78 120.46.182.129 etcd 节点 软件版本
软件名称 软件版本 OS Kylin Linux Advanced Server V10 CPU aarch64 鲲鹏 920 etcd 3.4.13 kubernetes 1.20.11 coreDNS 1.8.7 calico v3.15 metrice-server v0.6.1 kubernetes-dashboard v2.6.0 IP 地址分配
- Pod CIDR: 172.168.0.0/16
- Service CIDR: 10.96.0.0/16
服务器初始化配置
配置 hosts 解析
1
2
3
4
5
6
7
8
9cat >> /etc/hosts <<EOF
192.168.0.134 k8s-master01
192.168.0.41 k8s-node01
192.168.0.194 k8s-node02
192.168.0.9 base-server01 etcd-01 gfs-server01
192.168.0.30 base-server02 etcd-02 gfs-server02
192.168.0.78 base-server03 etcd-03 gfs-server03
EOF配置 PS1 样式
1
2
3
4
5
6
7
8
9cat >> /etc/profile<<EOF
# 修改 PS1 样式
export PS1='\[\e[37;40m\][\[\e[32;40m\]\u\[\e[37;40m\]@\[\e[33;40m\]\h \[\e[35;40m\]\W\[\e[0m\]]\\$ '
# 修改历史命令格式
export HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S \$(whoami) "
export PROMPT_COMMAND='{ msg=\$(history 1 | { read x y; echo \$y; }); logger "[euid=\$(whoami)]":\$(who am i):[\$(pwd)]"\$msg";}'
EOF配置阿里云 CentOS8 镜像源
1
2
3
4
5curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-8.repo
# 修改 repo 文件中的 $releasever 为 8,原因是在麒麟服务器操作系统 V10 中 $releasever 被修改为了 10,而我们需要使用 centos8 的镜像源,如果你不替换,基本上仓库的每一个地址都是404
sed -i 's/$releasever/8/g' /etc/yum.repos.d/CentOS-Base.repo
yum clean all && yum makecache安装常用的软件
1
yum install -y net-tools vim wget lrzsz tree bash-completion ntpdate
关闭防火墙与 Selinux
1
2
3systemctl disable --now firewalld
setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config关闭 swap 分区
1
swapoff -a && sysctl -w vm.swappiness=0 && sed -ri 's/.*swap.*/#&/' /etc/fstab
配置时间同步
1
2
3
4
5
6
7
8
9# 设置服务器时区
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' > /etc/timezone
# 配置定时任务同步时间 每5分钟同步一次
echo '*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com >/dev/null' >> /var/spool/cron/root
# 配置开机自动同步时间
echo '/usr/sbin/ntpdate time2.aliyun.com' >> /etc/rc.local配置服务器 limits 限制
1
2
3
4
5
6
7
8
9
10# 临时设置
ulimit -SHn 655350
# 永久设置
sed -i '/^# End/i\* soft nofile 655350' /etc/security/limits.conf
sed -i '/^# End/i\* hard nofile 131072' /etc/security/limits.conf
sed -i '/^# End/i\* soft nproc 655350' /etc/security/limits.conf
sed -i '/^# End/i\* hard nproc 655350' /etc/security/limits.conf
sed -i '/^# End/i\* soft memlock unlimited' /etc/security/limits.conf
sed -i '/^# End/i\* hard memlock unlimited' /etc/security/limits.conf配置内核参数调优
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system配置 ipvs 模块
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38# 安装依赖包
yum install -y ipvsadm ipset sysstat conntrack libseccomp
# 设置 ipvs 模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_lc
modprobe -- ip_vs_wlc
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_lblc
modprobe -- ip_vs_lblcr
modprobe -- ip_vs_dh
modprobe -- ip_vs_sh
modprobe -- ip_vs_fo
modprobe -- ip_vs_nq
modprobe -- ip_vs_sed
modprobe -- ip_vs_ftp
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
modprobe -- ip_tables
modprobe -- ip_set
modprobe -- xt_set
modprobe -- ipt_set
modprobe -- ipt_rpfilter
modprobe -- ipt_REJECT
modprobe -- ipip
EOF
# 修改文件权限
chmod 755 /etc/sysconfig/modules/ipvs.modules
# 启动 systemd-modules-load 服务
systemctl enable --now systemd-modules-load.service
# 执行 ipvs 模块文件
bash /etc/sysconfig/modules/ipvs.modules && lsmod |grep -e ip_vs -e nf_conntrack配置免密登录
1
2
3
4
5
6
7# 在 k8s-master01 节点上生成秘钥
ssh-keygen -t rsa
# 将公钥复制到其他机器
for i in k8s-master01 k8s-node01 k8s-node02 base-server01 base-server02 base-server03;do
ssh-copy-id -i .ssh/id_rsa.pub root@$i;
done
安装 Docker
安装方式参考 ARM64 架构安装 Docker
部署 ETCD 集群
生成 ETCD 证书
以下操作在
k8s-master01
节点执行
下载证书生成工具 cfssl
1
2
3
4
5
6
7
8
9
10
11
12# 下载 cfssl 源码
git clone -b v1.5.0 https://github.com/cloudflare/cfssl.git
# 编译 cfssl
cd cfssl/ && make
# 拷贝生成的二进制文件到 /usr/local/bin 目录
cp bin/{cfssl,cfssljson} /usr/local/bin/
# 查看 cfssl 版本
cfssl version
cfssljson -version创建 etcd 相关证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82mkdir ~/pki && cd ~/pki
# 创建 CA 配置文件
cat > ca-config.json<<EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
# 创建 CSR 文件
cat > etcd-ca-csr.json <<EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > etcd-csr.json <<EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
EOF
# 创建 etcd 证书存放目录
mkdir -p /usr/local/etcd/ssl
# 生成 etcd 证书
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /usr/local/etcd/ssl/etcd-ca
cfssl gencert \
-ca=/usr/local/etcd/ssl/etcd-ca.pem \
-ca-key=/usr/local/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,\
192.168.0.9,\
192.168.0.30,\
192.168.0.78 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /usr/local/etcd/ssl/etcd
部署 ETCD 集群
官方编译好的 arm64 二进制文件运行时会提示不支持 arm64 架构,如下:
running etcd on unsupported architecture “arm64” since ETCD_UNSUPPORTED_ARCH is set
所以需要手动修改源码后,重新编译 etcd 二进制文件;
下载 etcd 源码
1
git clone -b v3.4.13 https://github.com/etcd-io/etcd.git
修改源码,
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15vim etcdmain/etcd.go
# 原始代码如下所示,主要修改的地方为 if runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64le" 这一行
func checkSupportArch() {
// TODO qualify arm64
if runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64le" {
return
}
... 省略 N 行 ...
}
# 修改后的代码如下
if runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "arm64" {
return
}安装新版的 golang
1
2
3
4
5
6
7
8
9
10
11
12
13# 下载新版本的 golang 编译程序
wget https://golang.google.cn/dl/go1.18.6.linux-arm64.tar.gz
# 卸载系统最带的 golang
rpm -e golang.aarch64 --nodeps
# 安装 golang 安装包
tar xf go1.18.6.linux-arm64.tar.gz -C /usr/local/
echo "export PATH=\$PATH:/usr/local/go/bin" >> /etc/profile
source /etc/profile
# 检查 golang 版本
go version编译 etcd
1
2
3
4
5
6
7
8
9
10
11
12
13
14# 进入 etcd 源码目录
cd etcd
# 设置 go 代理
go env -w GOPROXY=https://goproxy.cn,direct
# 同步 mod
go mod vendor
# 编译 etcd
make build
# 检查编译好的二进制文件
./bin/etcd -version安装二进制文件
1
2mkdir -p /usr/local/etcd/{bin,cfg,ssl}
cp etcd/bin/{etcd,etcdctl} /usr/local/etcd/bin/创建 ETCD 配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48cat > /usr/local/etcd/cfg/etcd.config.yml << EOF
name: 'base-server01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.0.9:2380'
listen-client-urls: 'https://192.168.0.9:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.0.9:2380'
advertise-client-urls: 'https://192.168.0.9:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'base-server01=https://192.168.0.9:2380,base-server02=https://192.168.0.30:2380,base-server03=https://192.168.0.78:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/usr/local/etcd/ssl/etcd.pem'
key-file: '/usr/local/etcd/ssl/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/usr/local/etcd/ssl/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/usr/local/etcd/ssl/etcd.pem'
key-file: '/usr/local/etcd/ssl/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/usr/local/etcd/ssl/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF不同的节点需要对配置进行修改,需要修改的地方如下:
- name: 节点名称,集群中必须唯一
- listen-peer-urls: 修改为当前节点的IP地址
- isten-client-urls: 修改为当前节点的IP地址
- initial-advertise-peer-urls: 修改为当前节点的IP地址
- advertise-client-urls: 修改为当前节点的IP地址
创建 ETCD 服务管理文件(所有节点配置一样)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/local/etcd/bin/etcd --config-file=/usr/local/etcd/cfg/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF拷贝 etcd 证书到所有的 etcd 节点
该步骤在
k8s-master01
节点上操作,因为证书是在 k8s-master01 节点上生成的1
2
3for i in base-server01 base-server02 base-server03;do
scp -r /usr/local/etcd/ssl/* root@$i:/usr/local/etcd/ssl/;
done启动 Etcd 并配置开机启动
1
2
3
4
5# 由于麒麟系统默认的 Umask 为 0077,所以需要修改文件的权限,否则可能会报权限问题
chmod 755 -R /usr/local/etcd/
# 启动 etcd
systemctl enable --now etcd查看 etcd 状态
1
2
3
4
5
6
7
8export ETCDCTL_API=3
/usr/local/etcd/bin/etcdctl \
--cacert=/usr/local/etcd/ssl/etcd-ca.pem \
--cert=/usr/local/etcd/ssl/etcd.pem \
--key=/usr/local/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.0.9:2379,\
https://192.168.0.30:2379,\
https://192.168.0.78:2379" endpoint status --write-out=table
部署 Kubernetes
部署 Master 节点
部署 Master 组件
下载 k8s 二进制安装包
1
wget https://dl.k8s.io/v1.20.11/kubernetes-server-linux-arm64.tar.gz
解压压缩包
1
tar zxvf kubernetes-server-linux-arm64.tar.gz
安装二进制文件
1
2
3mkdir -p /usr/local/kubernetes/{bin,cfg,ssl,manifests}
cp kubernetes/server/bin/kube{-apiserver,-controller-manager,-scheduler,-proxy,let} /usr/local/kubernetes/bin/
cp kubernetes/server/bin/kubectl /usr/local/bin/创建 Apiserver 服务管理文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49cat > /usr/lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/kubernetes/bin/kube-apiserver \\
--v=2 \\
--logtostderr=true \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=192.168.0.134 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://192.168.0.9:2379,https://192.168.0.30:2379,https://192.168.0.78:2379 \\
--etcd-cafile=/usr/local/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/usr/local/etcd/ssl/etcd.pem \\
--etcd-keyfile=/usr/local/etcd/ssl/etcd-key.pem \\
--client-ca-file=/usr/local/kubernetes/ssl/ca.pem \\
--tls-cert-file=/usr/local/kubernetes/ssl/apiserver.pem \\
--tls-private-key-file=/usr/local/kubernetes/ssl/apiserver-key.pem \\
--kubelet-client-certificate=/usr/local/kubernetes/ssl/apiserver.pem \\
--kubelet-client-key=/usr/local/kubernetes/ssl/apiserver-key.pem \\
--service-account-key-file=/usr/local/kubernetes/ssl/sa.pub \\
--service-account-signing-key-file=/usr/local/kubernetes/ssl/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/usr/local/kubernetes/ssl/front-proxy-ca.pem \\
--proxy-client-cert-file=/usr/local/kubernetes/ssl/front-proxy-client.pem \\
--proxy-client-key-file=/usr/local/kubernetes/ssl/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--feature-gates=EphemeralContainers=true \\
--enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF创建 controller-manager 服务管理文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33cat >/usr/lib/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/kubernetes/bin/kube-controller-manager \\
--v=2 \\
--logtostderr=true \\
--address=0.0.0.0 \\
--root-ca-file=/usr/local/kubernetes/ssl/ca.pem \\
--cluster-signing-cert-file=/usr/local/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/usr/local/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/usr/local/kubernetes/ssl/sa.key \\
--kubeconfig=/usr/local/kubernetes/cfg/controller-manager.kubeconfig \\
--leader-elect=true \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--pod-eviction-timeout=2m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--allocate-node-cidrs=true \\
--cluster-cidr=172.168.0.0/16 \\
--requestheader-client-ca-file=/usr/local/kubernetes/ssl/front-proxy-ca.pem \\
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF创建 scheduler 组件的服务管理文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20cat >/usr/lib/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/kubernetes/bin/kube-scheduler \\
--v=2 \\
--logtostderr=true \\
--address=0.0.0.0 \\
--leader-elect=true \\
--kubeconfig=/usr/local/kubernetes/cfg/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
生成 Master 证书
创建 CA 证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50# 创建证书存放目录
mkdir -p /usr/local/kubernetes/ssl
# 创建 CA 配置文件
cat > ca-config.json<<EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
# 创建 ca-csr.json 文件
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
# 创建 CA 证书以及 Key
cfssl gencert -initca ca-csr.json | cfssljson -bare /usr/local/kubernetes/ssl/ca创建 Apiserver 组件的相关证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36# 创建 apiserver-csr.json 文件
cat > apiserver-csr.json <<EOF
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 创建 apiserver 证书
cfssl gencert \
-ca=/usr/local/kubernetes/ssl/ca.pem \
-ca-key=/usr/local/kubernetes/ssl/ca-key.pem \
-config=ca-config.json \
-hostname=10.96.0.1,\
120.46.211.33,\
192.168.0.134,\
127.0.0.1,\
kubernetes,\
kubernetes.default,\
kubernetes.default.svc,\
kubernetes.default.svc.cluster,\
kubernetes.default.svc.cluster.local \
-profile=kubernetes \
apiserver-csr.json | cfssljson -bare /usr/local/kubernetes/ssl/apiserver生成 Apiserver 的聚合证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32# 创建 front-proxy-ca-csr.json 文件
cat > front-proxy-ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
# 创建 front-proxy-client-csr.json 文件
cat > front-proxy-client-csr.json <<EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
# 创建 Apiserver 聚合 CA
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /usr/local/kubernetes/ssl/front-proxy-ca
# 创建 Apiserver 聚合证书
cfssl gencert \
-ca=/usr/local/kubernetes/ssl/front-proxy-ca.pem \
-ca-key=/usr/local/kubernetes/ssl/front-proxy-ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
front-proxy-client-csr.json | cfssljson -bare /usr/local/kubernetes/ssl/front-proxy-client生成 controller-manager 组件的相关证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45# 创建 manager-csr.json 文件
cat > manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 创建 controller-manager 证书
cfssl gencert \
-ca=/usr/local/kubernetes/ssl/ca.pem \
-ca-key=/usr/local/kubernetes/ssl/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /usr/local/kubernetes/ssl/controller-manager
# 创建 controller-manager 组件的 kubeconfig 文件
kubectl config set-cluster kubernetes \
--certificate-authority=/usr/local/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.0.134:6443 \
--kubeconfig=/usr/local/kubernetes/cfg/controller-manager.kubeconfig && \
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/usr/local/kubernetes/ssl/controller-manager.pem \
--client-key=/usr/local/kubernetes/ssl/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/usr/local/kubernetes/cfg/controller-manager.kubeconfig && \
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/usr/local/kubernetes/cfg/controller-manager.kubeconfig && \
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/usr/local/kubernetes/cfg/controller-manager.kubeconfig生成 scheduler 组件的相关证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45# 创建 scheduler-csr.json 文件
cat > scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 创建 scheduler 证书
cfssl gencert \
-ca=/usr/local/kubernetes/ssl/ca.pem \
-ca-key=/usr/local/kubernetes/ssl/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /usr/local/kubernetes/ssl/scheduler
# 创建 scheduler 组件的 kubeconfig 文件
kubectl config set-cluster kubernetes \
--certificate-authority=/usr/local/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.0.134:6443 \
--kubeconfig=/usr/local/kubernetes/cfg/scheduler.kubeconfig && \
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/usr/local/kubernetes/ssl/scheduler.pem \
--client-key=/usr/local/kubernetes/ssl/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/usr/local/kubernetes/cfg/scheduler.kubeconfig && \
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/usr/local/kubernetes/cfg/scheduler.kubeconfig && \
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/usr/local/kubernetes/cfg/scheduler.kubeconfig生成集群管理员 admin 的证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45# 创建 admin-csr.json 文件
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 创建 admin 证书
cfssl gencert \
-ca=/usr/local/kubernetes/ssl/ca.pem \
-ca-key=/usr/local/kubernetes/ssl/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /usr/local/kubernetes/ssl/admin
# 创建 admin 管理员的 kubeconfig 文件
kubectl config set-cluster kubernetes \
--certificate-authority=/usr/local/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.0.134:6443 \
--kubeconfig=/usr/local/kubernetes/cfg/admin.kubeconfig && \
kubectl config set-credentials kubernetes-admin \
--client-certificate=/usr/local/kubernetes/ssl/admin.pem \
--client-key=/usr/local/kubernetes/ssl/admin-key.pem \
--embed-certs=true \
--kubeconfig=/usr/local/kubernetes/cfg/admin.kubeconfig && \
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=/usr/local/kubernetes/cfg/admin.kubeconfig && \
kubectl config use-context kubernetes-admin@kubernetes \
--kubeconfig=/usr/local/kubernetes/cfg/admin.kubeconfig生成 ServiceAccount 账户的秘钥
1
2
3openssl genrsa -out /usr/local/kubernetes/ssl/sa.key 2048
openssl rsa -in /usr/local/kubernetes/ssl/sa.key -pubout -out /usr/local/kubernetes/ssl/sa.pub
启动 Master 组件
启动 master 组件
1
systemctl enable --now kube-apiserver.service kube-controller-manager.service kube-scheduler.service
配置 kubectl
1
2
3
4
5
6
7
8
9
10
11
12
13
14mkdir ~/.kube
cp /usr/local/kubernetes/cfg/admin.kubeconfig ~/.kube/config
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
echo "source ~/.kube/completion.bash.inc" >> ~/.bash_profile
source $HOME/.bash_profile测试 kubectl
1
2
3
4
5kubectl get cs
kubectl cluster-info
kubectl get svc
部署 Node 节点
生成 Worker 证书
在 master 节点创建 bootstrap.secret.yaml 文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF创建 Secert 以及 RBAC 配置
1
kubectl create -f bootstrap.secret.yaml
创建 bootstrap-kubelet.kubeconfig 文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14kubectl config set-cluster kubernetes \
--certificate-authority=/usr/local/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.0.134:6443 \
--kubeconfig=/usr/local/kubernetes/cfg/bootstrap-kubelet.kubeconfig && \
kubectl config set-credentials tls-bootstrap-token-user \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/usr/local/kubernetes/cfg/bootstrap-kubelet.kubeconfig && \
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/usr/local/kubernetes/cfg/bootstrap-kubelet.kubeconfig && \
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/usr/local/kubernetes/cfg/bootstrap-kubelet.kubeconfig创建 kube-proxy 组件的 serviceaccount
1
kubectl -n kube-system create serviceaccount kube-proxy
创建 kube-proxy 组件的 kubeconfig 文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22kubectl create clusterrolebinding system:kube-proxy \
--clusterrole system:node-proxier \
--serviceaccount kube-system:kube-proxy
SECRET=$(kubectl -n kube-system get sa/kube-proxy \
--output=jsonpath='{.secrets[0].name}')
JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
--output=jsonpath='{.data.token}' | base64 -d)
kubectl config set-cluster kubernetes \
--certificate-authority=/usr/local/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.0.134:6443 \
--kubeconfig=/usr/local/kubernetes/cfg/kube-proxy.kubeconfig && \
kubectl config set-credentials kubernetes \
--token=${JWT_TOKEN} \
--kubeconfig=/usr/local/kubernetes/cfg/kube-proxy.kubeconfig && \
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=kubernetes \
--kubeconfig=/usr/local/kubernetes/cfg/kube-proxy.kubeconfig && \
kubectl config use-context kubernetes \
--kubeconfig=/usr/local/kubernetes/cfg/kube-proxy.kubeconfig
部署 kubelet 服务
安装二进制文件,从 master 节点拷贝 kubelet 文件
1
2
3
4
5# 在所有 worker 节点创建 kubernetes 安装目录
mkdir -p /usr/local/kubernetes/{bin,cfg,ssl,manifests} /opt/cni/bin
# 拷贝 kubelet 二进制文件到 worker
scp -r /usr/local/kubernetes/bin/kubelet root@k8s-node01:/usr/local/kubernetes/bin/从 master 节点拷贝证书
1
2scp /usr/local/kubernetes/ssl/{ca,ca-key,front-proxy-ca}.pem root@k8s-node01:/usr/local/kubernetes/ssl/
scp /usr/local/kubernetes/cfg/bootstrap-kubelet.kubeconfig root@k8s-node01:/usr/local/kubernetes/cfg/创建 kubelet 服务管理文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29cat >/usr/lib/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/kubernetes/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
mkdir -p /etc/systemd/system/kubelet.service.d
cat >/etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/usr/local/kubernetes/cfg/bootstrap-kubelet.kubeconfig --kubeconfig=/usr/local/kubernetes/cfg/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/usr/local/kubernetes/cfg/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/59izt/pause-arm64:3.2"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/kubernetes/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS
EOF创建 kubelet 配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72cat >/usr/local/kubernetes/cfg/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /usr/local/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /usr/local/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF启动 kubelet 服务并配置开机启动
1
systemctl enable --now kubelet
部署 kube-proxy 服务
安装二进制文件,从 master 节点拷贝 kube-proxy 文件以及 kubeconfig 文件
1
2scp /usr/local/kubernetes/bin/kube-proxy root@k8s-node01:/usr/local/kubernetes/bin/
scp /usr/local/kubernetes/cfg/kube-proxy.kubeconfig root@k8s-node01:/usr/local/kubernetes/cfg/创建 kube-proxy 的服务管理文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17cat >/usr/lib/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/kubernetes/bin/kube-proxy \\
--config=/usr/local/kubernetes/cfg/kube-proxy.conf \\
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF创建 kube-proxy 的配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38cat >/usr/local/kubernetes/cfg/kube-proxy.conf <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /usr/local/kubernetes/cfg/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.168.0.0/16
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF启动 kube-proxy 并配置开机启动
1
systemctl enable --now kube-proxy.service
部署 kubernetes 插件
安装 Calico 网络插件
下载 calico 资源配置文件
1
curl https://docs.projectcalico.org/archive/v3.15/manifests/calico-etcd.yaml -O
修改 calico-etcd 配置,添加 ETCD 节点信息以及证书
1
2
3
4
5
6
7
8
9
10
11sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://192.168.0.9:2379,https://192.168.0.30:2379,https://192.168.0.78:2379"#g' calico-etcd.yaml
ETCD_CA=`cat /usr/local/etcd/ssl/etcd-ca.pem | base64 | tr -d '\n'`
ETCD_CERT=`cat /usr/local/etcd/ssl/etcd.pem | base64 | tr -d '\n'`
ETCD_KEY=`cat /usr/local/etcd/ssl/etcd-key.pem | base64 | tr -d '\n'`
sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml
sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml修改 CIDR 网段为 172.168.0.0/16
1
2sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g' calico-etcd.yaml
sed -i 's@# value: "192.168.0.0/16"@ value: "172.168.0.0/16"@g' calico-etcd.yaml安装 calico
1
kubectl create -f calico-etcd.yaml
查看 calico 状态
1
kubectl get pods -n kube-system
查看 node 状态
1
kubectl get nodes -owide
安装 CoreDNS 插件
系在最新版的 CoreDNS
1
git clone https://github.com/coredns/deployment.git
安装 CoreDNS
1
2
3cd deployment/kubernetes
./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -查看 CoreDNS 安装状态
1
kubectl get pods -n kube-system -l k8s-app=kube-dns
安装 Metrics-server 插件
下载 metric-server 资源清单
1
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/metrics-server-helm-chart-3.8.2/components.yaml
修改下载好的文件,主要修改的地方有以下
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49spec:
# 0. 添加节点亲和性,将 metrics 服务部署到 master 节点
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/edge
operator: DoesNotExist
- key: node-role.kubernetes.io/agent
operator: DoesNotExist
- key: kubernetes.io/hostname
operator: In
values:
- k8s-master01
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443 # 1. 修改安全端口为 4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls # 2. 添加以下内容
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
image: registry.cn-hangzhou.aliyuncs.com/59izt/metrics-server-kubeedge:latest # 3. 修改镜像使用自己同步的阿里云镜像
...
ports:
- containerPort: 4443 # 4. 修改容器暴露端口为 4443
name: https
protocol: TCP
...
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- mountPath: /etc/kubernetes/pki # 5. 挂载卷到容器
name: ca-ssl
nodeSelector:
kubernetes.io/os: linux
...
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl # 6. 挂载证书到卷
hostPath:
path: /usr/local/kubernetes/ssl安装 metrics-server
1
2
3
4
5# 修改 kubernetes 安装目录的权限,否则 metrics-server 会启动失败,因为 麒麟系统默认的文件权限为 0600
chmod 755 -R /usr/local/kubernetes
# 创建 metrics-server
kubectl create -f components.yaml查看 metrics-server 安装状态
1
kubectl get pod -n kube-system -l k8s-app=metrics-server -owide
查看资源指标
1
2
3
4
5
6
7
8
9
10
11
12
13
14# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 114m 1% 2767Mi 8%
k8s-node01 60m 0% 1410Mi 4%
k8s-node02 56m 0% 1436Mi 4%
# kubectl top pods -A
NAMESPACE NAME CPU(cores) MEMORY(bytes)
kube-system calico-kube-controllers-75864f986d-xp5dr 2m 15Mi
kube-system calico-node-7gfqg 17m 50Mi
kube-system calico-node-mlqc8 16m 49Mi
kube-system calico-node-wc4r2 14m 50Mi
kube-system coredns-7d466475c7-g8hkk 1m 15Mi
kube-system metrics-server-7b7fcb774f-n2szd 2m 19Mi
安装 Ingress-Nginx
安装 Helm
1
2
3
4
5
6
7
8
9# 下载 Helm
wget https://get.helm.sh/helm-v3.5.3-linux-arm64.tar.gz
# 解压压缩包,并复制二进制文件到 `/usr/local/bin` 目录
tar zxvf helm-v3.5.3-linux-arm64.tar.gz
mv linux-arm64/helm /usr/local/bin/
# 检查 helm 安装是否成功
helm help安装 Ingress-nginx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25# 添加 ingress-nginx 仓库
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
# 下载 ingress-nginx 包
helm pull ingress-nginx/ingress-nginx
# 解压下载的 chart 包,需要修改的内容如下
- Controller 和 admissionWebhook 的镜象地址,需要将公网镜像同步至公司内网镜像仓库;
- hostNetwork 的值设置为 true;
- dnsPolicy 设置为 ClusterFirstWithHostNet;
- NodeSelector 添加 ingress: "true" ,方便部署到指定节点;
- 资源类型更改为 kind: DaemonSet;
- 修改端口类型 type: LoadBalancer 为 type: ClusterIP
# 给需要部署 ingress 的节点打标签
kubectl label nodes k8s-node01 ingress=true
# 创建 ingress-nginx 命名空间
kubectl create ns ingress-nginx
# 部署 ingress
helm install ingress-nginx -n ingress-nginx .
# 查看 ingress 安装
kubectl get pods -n ingress-nginx -owide
安装 kubernetes-dashboard
下载 kubernetes-dashboard 资源配置文件
1
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
安装 kubernetes-dashboard
1
kubectl create -f recommended.yaml
查看 kubernetes-dashboard 安装状态
1
kubectl get pods -n kubernetes-dashboard
创建 admin 管理员账号
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22cat <<EOF | kubectl apply -f - -n kube-system
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
EOF修改 svc 端口类型为 NodePort
1
kubectl edit svc -n kubernetes-dashboard kubernetes-dashboard
获取 admim 账号的 token
1
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')