how to install k8s cluster

2020-08-15 17:58:55
环境和版本说明

系统: CentOS Linux release 7.8.2003 (Core)
etcd: etcd-v3.4.7
k8s: v1.18.6
Calico: v3.15.1
docker: docker-ce-19.03
负载均衡生产一般建议采用阿里云slb,测试环境可以使用nginx代替
service-cluster-ip 10.10.0.0/16
pods-ip 10.20.0.0/16
集群dns 10.10.0.2
k8s svc 10.10.0.1
集群使用默认 svc.cluster.local

1
2
3
[root@master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.10.0.1 <none> 443/TCP 33h

master和etcd在同一台主机上

1
2
3
4
5
6
7
master01   192.168.1.101    kube-apiserver kube-controller-manager kube-scheduler
master02 192.168.1.102 kube-apiserver kube-controller-manager kube-scheduler
master03 192.168.1.103 kube-apiserver kube-controller-manager kube-scheduler
slb    192.168.1.31 nginx
node1 192.168.1.104 kubelet kube-proxy calico
node2 192.168.1.105 kubelet kube-proxy calico
node3 192.168.1.106 kubelet kube-proxy calico

初始化

升级内核

1
2
3
4
5
6
7
8
9
10
11
12
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt
#查看可用内核
cat /boot/grub2/grub.cfg |grep menuentry
#设置开机从新内核启动
grub2-set-default "CentOS Linux (4.4.232-1.el7.elrepo.x86_64) 7 (Core)"
#查看内核启动项
grub2-editenv list
#重启系统使内核生效
reboot
#查看内核版本是否生效
uname -r

初始化系统

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
systemctl stop firewalld
systemctl disable firewalld
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
setenforce  0 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config

# 将桥接的IPv4流量传递到iptables的链
cat>/etc/sysctl.d/k8s.conf<< EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 11
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
net.ipv6.conf.all.forwarding=1
net.ipv4.ip_forward=1
vm.swappiness=0
EOF
sysctl --system # 生效

yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io
systemctl enable docker && systemctl start docker

部署etcd集群

在部署之前先自签证书,参考上一篇

1
2
3
4
5
6
7
8
9
mkdir /data/etcd/ -p
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
cd /data/etcd/
wget https://github.com/etcd-io/etcd/releases/download/v3.4.7/etcd-v3.4.7-linux-amd64.tar.gz
tar zxvf etcd-v3.4.7-linux-amd64.tar.gz
cd etcd-v3.4.7-linux-amd64
cp -a etcd etcdctl /opt/kubernetes/bin/
echo 'export PATH=$PATH:/opt/kubernetes/bin' >> /etc/profile
source /etc/profile

分发证书,并依次在master01,master02,master03执行,并注意替换相应ip

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
[root@master01 etcd]# cat etcd.sh 
#!/bin/bash
ETCD_NAME=${1:-"etcd01"}
ETCD_IP=${2:-"127.0.0.1"}
ETCD_CLUSTER=${3:-"etcd01=https://127.0.0.1:2379"}

cat<<EOF>/opt/kubernetes/cfg/etcd.yml
name: ${ETCD_NAME}
data-dir: /var/lib/etcd/default.etcd
listen-peer-urls: https://${ETCD_IP}:2380
listen-client-urls: https://${ETCD_IP}:2379,https://127.0.0.1:2379
advertise-client-urls: https://${ETCD_IP}:2379
initial-advertise-peer-urls: https://${ETCD_IP}:2380
initial-cluster: ${ETCD_CLUSTER}
initial-cluster-token: etcd-cluster
initial-cluster-state: new
client-transport-security:
cert-file: /opt/kubernetes/ssl/server.pem
key-file: /opt/kubernetes/ssl/server-key.pem
client-cert-auth: false
trusted-ca-file: /opt/kubernetes/ssl/ca.pem
auto-tls: false
peer-transport-security:
cert-file: /opt/kubernetes/ssl/server.pem
key-file: /opt/kubernetes/ssl/server-key.pem
client-cert-auth: false
trusted-ca-file: /opt/kubernetes/ssl/ca.pem
auto-tls: false
debug: false
logger: zap
log-outputs: [stderr]
EOF

cat<<EOF>/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
Documentation=https://github.com/etcd-io/etcd
Conflicts=etcd.service
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
LimitNOFILE=65536
Restart=on-failure
RestartSec=5s
TimeoutStartSec=0
ExecStart=/opt/kubernetes/bin/etcd --config-file=/opt/kubernetes/cfg/etcd.yml

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd

#master01
./etcd.sh etcd01 192.168.1.101 etcd01=https://192.168.1.101:2380,etcd02=https://192.168.1.102:2380,etcd03=https://192.168.1.103:2380

#master02
./etcd.sh etcd02 192.168.1.102 etcd01=https://192.168.1.101:2380,etcd02=https://192.168.1.102:2380,etcd03=https://192.168.1.103:2380

#master03
./etcd.sh etcd03 192.168.1.103 etcd01=https://192.168.1.101:2380,etcd02=https://192.168.1.102:2380,etcd03=https://192.168.1.103:2380


#健康状态
[root@master01 etcd]# /opt/kubernetes/bin/etcdctl --cacert=/opt/kubernetes/ssl/ca.pem --cert=/opt/kubernetes/ssl/server.pem --key=/opt/kubernetes/ssl/server-key.pem --endpoints=https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 endpoint status
https://192.168.1.101:2379, f592173020929e13, 3.4.7, 1.8 MB, false, false, 19, 53478, 53478,
https://192.168.1.102:2379, 4eb9518f73eaf60f, 3.4.7, 1.8 MB, false, false, 19, 53478, 53478,
https://192.168.1.103:2379, 2123e126fcaeb456, 3.4.7, 1.8 MB, true, false, 19, 53478, 53478,

#写入foo
/opt/kubernetes/bin/etcdctl --cacert=/opt/kubernetes/ssl/ca.pem --cert=/opt/kubernetes/ssl/server.pem --key=/opt/kubernetes/ssl/server-key.pem --endpoints=https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 put foo "Hello World"
#取出foo
[root@master02 ~]# /opt/kubernetes/bin/etcdctl --cacert=/opt/kubernetes/ssl/ca.pem --cert=/opt/kubernetes/ssl/server.pem --key=/opt/kubernetes/ssl/server-key.pem --endpoints=https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 get foo
foo
Hello World

k8s安装

K8S二进制包

1
2
3
4
5
6
7
8
9
10
mkdir /data/k8s-package
cd /data/k8s-package
wget https://dl.k8s.io/v1.18.6/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd /data/k8s-package/kubernetes/server/bin
cp -a kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /opt/kubernetes/bin

# copy 执行文件到 master02 master03 机器 /opt/kubernetes/bin
scp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy root@master02:/opt/kubernetes/bin/
scp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy root@master03:/opt/kubernetes/bin/

创建Node节点kubeconfig文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
[root@master01 ~]# cat /data/ssl/kubeconfig.sh 
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat>token.csv<<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

export KUBE_APISERVER="https://lb.abc.com.cn:6443"

kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig

kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig

kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig



kubectl -n kube-system create serviceaccount kube-proxy
kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy

kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#sh kubeconfig.sh
#cp *kubeconfig /opt/kubernetes/cfg
#scp *kubeconfig root@master02:/opt/kubernetes/cfg
#scp *kubeconfig root@master03:/opt/kubernetes/cfg

配置master组件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
mkdir /data/k8s-master
[root@master01 k8s-master]# cat apiserver.sh
#!/bin/bash

MASTER_ADDRESS=${1:-"192.168.1.101"}
ETCD_SERVERS=${2:-"https://127.0.0.1:2379"}

cat<<EOF>/opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.10.0.0/16 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction \\
--authorization-mode=Node,RBAC \\
--kubelet-https=true \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \\
--etcd-certfile=/opt/kubernetes/ssl/server.pem \\
--etcd-keyfile=/opt/kubernetes/ssl/server-key.pem \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--proxy-client-cert-file=/opt/kubernetes/ssl/metrics-server.pem \\
--proxy-client-key-file=/opt/kubernetes/ssl/metrics-server-key.pem \\
--runtime-config=api/all=true \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-truncate-enabled=true \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log"
EOF


cat<<EOF>/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF


systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver



[root@master01 k8s-master]# cat controller-manager.sh
#!/bin/bash

MASTER_ADDRESS=${1:-"127.0.0.1"}

cat<<EOF>/opt/kubernetes/cfg/kube-controller-manager

KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=2 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--bind-address=0.0.0.0 \\
--service-cluster-ip-range=10.10.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--feature-gates=RotateKubeletServerCertificate=true \\
--feature-gates=RotateKubeletClientCertificate=true \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.20.0.0/16 \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem"
EOF

cat<<EOF>/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager


[root@master01 k8s-master]# cat scheduler.sh
#!/bin/bash

MASTER_ADDRESS=${1:-"127.0.0.1"}

cat<<EOF>/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=2 \\
--master=${MASTER_ADDRESS}:8080 \\
--address=0.0.0.0 \\
--leader-elect"
EOF

cat<<EOF>/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler

#分发token.csv,依次在master01 master02 master03执行相应的脚本
#master01
./apiserver.sh 192.168.1.101 https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 ./controller-manager.sh 127.0.0.1
./scheduler.sh 127.0.0.1

#master02
./apiserver.sh 192.168.1.102 https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 ./controller-manager.sh 127.0.0.1
./scheduler.sh 127.0.0.1

#master03
./apiserver.sh 192.168.1.103 https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 ./controller-manager.sh 127.0.0.1
./scheduler.sh 127.0.0.1

[root@master01 k8s-master]# ps -ef | grep kube
root 4148 1 3 17:50 ? 00:01:24 /opt/kubernetes/bin/etcd --config-file=/opt/kubernetes/cfg/etcd.yml
root 4174 1 3 17:50 ? 00:01:22 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/var/log/kubernetes --etcd-servers=https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 --bind-address=0.0.0.0 --secure-port=6443 --advertise-address=192.168.1.101 --allow-privileged=true --service-cluster-ip-range=10.10.0.0/16 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --kubelet-client-certificate=/optkubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/kubernetes/ssl/ca.pem --etcd-certfile=/opt/kubernetes/ssl/server.pem --etcd-keyfile=/optkubernetes/ssl/server-key.pem --requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --proxy-client-cert-file=/opt/kubernetes/ssl/metrics-server.pem --proxy-client-key-file=/opt/kubernetes/ssl/metrics-server-key.pem --runtime-config=api/all=true --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-truncate-enabled=true --audit-log-path=/var/logkubernetes/k8s-audit.log
root 4281 1 0 17:52 ? 00:00:04 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=2 --master=127.0.0.1:8080 --leader-elect=true --bind-address=0.0.0.0 --service-cluster-ip-range=10.10.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s --feature-gates=RotateKubeletServerCertificate=true --feature-gates=RotateKubeletClientCertificate=true --allocate-node-cidrs=true --cluster-cidr=10.20.0.0/16 --root-ca-file=/opt/kubernetes/ssl/ca.pem
root 4302 1 0 17:52 ? 00:00:05 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=2 --master=127.0.0.1:8080 --address=0.0.0.0 --leader-elect

#查看写入etcd内容,用于排查错误
/opt/kubernetes/bin/etcdctl --cacert=/opt/kubernetes/ssl/ca.pem --cert=/opt/kubernetes/ssl/server.pem --key=/opt/kubernetes/ssl/server-key.pem --endpoints=https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 get /registry/ --prefix --keys-only


[root@master01 k8s-master]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}

[root@master01 k8s-master]# kubectl cluster-info
Kubernetes master is running at http://localhost:8080

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

nginx相关配置

开启tcp代理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
stream{
log_format proxy '$remote_addr [$time_local] '
'$protocol $status $bytes_sent $bytes_received '
'$session_time "$upstream_addr" '
'"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
upstream k8s-apiserver {
hash $remote_addr consistent;
server 192.168.1.101:6443;
server 192.168.1.102:6443;
server 192.168.1.103:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
access_log logs/apiserver.log proxy;
}
}

kubelet证书自动续期和创建Node授权用户

Node节点 授权用户 kubelet-bootstrap

1
kubectl create clusterrolebinding  kubelet-bootstrap --clusterrole=system:node-bootstrapper  --user=kubelet-bootstrap

具有自动批准 selfnodeserver 类型 CSR 请求的能力

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[root@master01 kubelet-certificate-rotating]# cat tls-instructs-csr.yaml 
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
rules:
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests/selfnodeserver"]
verbs: ["create"]


kubectl apply -f tls-instructs-csr.yaml

#自动批准 kubelet-bootstrap 用户 TLS bootstrapping 首次申请证书的 CSR 请求
kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --user=kubelet-bootstrap

#自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求
kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes


#自动批准 system:nodes 组用户更新 kubelet 10250 api 端口证书的 CSR 请求
kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes


[root@master01 kubelet-certificate-rotating]# kubectl get clusterrolebinding|egrep "node-(.*)-auto"
node-client-auto-approve-csr ClusterRole/system:certificates.k8s.io:certificatesigningrequests:nodeclient 2m4s
node-client-auto-renew-crt ClusterRole/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient 109s
node-server-auto-renew-crt ClusterRole/system:certificates.k8s.io:certificatesigningrequests:selfnodeserver 93s

配置Node组件并运行
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
mkdir /data/k8s-node
cd /data/k8s-node/

[root@node01 k8s-node]# cat kubelet.sh
#!/bin/bash
#create static pod directory
mkdir -p /etc/kubernetes/manifests

DNS_SERVER_IP=${1:-"10.10.0.2"}
HOSTNAME=${2:-"`hostname`"}
CLUETERDOMAIN=${3:-"cluster.local"}
cat<<EOF>/opt/kubernetes/cfg/kubelet.conf
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--hostname-override=${HOSTNAME} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--network-plugin=cni \\
--cni-conf-dir=/etc/cni/net.d \\
--cni-bin-dir=/opt/cni/bin \\
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2"
EOF

cat<<EOF>/opt/kubernetes/cfg/kubelet-config.yml
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
clusterDNS:
- ${DNS_SERVER_IP}
clusterDomain: ${CLUETERDOMAIN}
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
featureGates:
RotateKubeletServerCertificate: true
RotateKubeletClientCertificate: true
EOF

cat<<EOF>/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet



[root@node01 k8s-node]# cat proxy.sh
#!/bin/bash

HOSTNAME=${1:-"`hostname`"}

cat<<EOF>/opt/kubernetes/cfg/kube-proxy.conf
KUBE_PROXY_OPTS="--logtostderr=true \\
--v=2 \\
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF

cat<<EOF>/opt/kubernetes/cfg/kube-proxy-config.yml
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 0
contentType: ""
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
qps: 0
clusterCIDR: 10.20.0.0/16
configSyncPeriod: 0s
conntrack:
maxPerCore: null
min: null
tcpCloseWaitTimeout: null
tcpEstablishedTimeout: null
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ${HOSTNAME}
iptables:
masqueradeAll: false
masqueradeBit: null
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
udpIdleTimeout: 0s
winkernel:
enableDSR: false
networkName: ""
sourceVip: ""
EOF

cat<<EOF>/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

#node01
./kubelet.sh 10.10.0.2 node01 cluster.local
./proxy.sh
#node02
./kubelet.sh 10.10.0.2 node02 cluster.local
./proxy.sh
#node03
./kubelet.sh 10.10.0.2 node03 cluster.local
./proxy.sh

#在任意master查看
[root@master01 k8s-node]# kubectl get node
NAME STATUS ROLES AGE VERSION
node01 NotReady <none> 13h v1.18.6
node02 NotReady <none> 27m v1.18.6
node03 NotReady <none> 27m v1.18.6

节点处理 NoReady 状态,是因为目前还没有安装网络组件

解决无法查询pods日志问题

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@master01 yaml]# cat rbac/apiserver-to-kubelet-rbac.yml 
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubelet-api-admin
subjects:
- kind: User
name: kubernetes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:kubelet-api-admin
apiGroup: rbac.authorization.k8s.io

kubectl apply -f ~/yaml/rbac/apiserver-to-kubelet-rbac.yml

RBAC DENY解决办法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
I0905 16:15:47.988359   22403 rbac.go:119] RBAC DENY: user "system:node:node01" groups ["system:nodes" "system:authenticated"] cannot "update" resource "leases.coordination.k8s.io" named "node01" in namespace "kube-node-lease"
解决方法1
[root@master01 kubernetes]# kubectl describe clusterrolebindings system:node
Name: system:node
Labels: kubernetes.io/bootstrapping=rbac-defaults
Annotations: rbac.authorization.kubernetes.io/autoupdate: true
Role:
Kind: ClusterRole
Name: system:node
Subjects:
Kind Name Namespace
---- ---- ---------

[root@master01 metrics]# kubectl create clusterrolebinding kubelet-node-clusterbinding --clusterrole=system:node --group=system:nodes
clusterrolebinding.rbac.authorization.k8s.io/kubelet-node-clusterbinding created
[root@master01 metrics]# kubectl describe clusterrolebindings kubelet-node-clusterbinding
Name: kubelet-node-clusterbinding
Labels: <none>
Annotations: <none>
Role:
Kind: ClusterRole
Name: system:node
Subjects:
Kind Name Namespace
---- ---- ---------
Group system:nodes

解决方法2
参考[这里](https://github.com/kubernetes/kubernetes/issues/61511)
Ensure Node authorization comes first in the authorizer mode arguments:

--authorization-mode=Node,RBAC

If you reverse those, RBAC will attempt to authorize, fail and log, then the Node authorizer will succeed. The RBAC message is harmless in this case, but is annoying.

Don't grant the system:node role to the system:nodes group or you will undo the protection of the Node authorizer.

kubernetes启用ipvs,在所有节点执行
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
yum -y install ipvsadm

cat<<EOF>/etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_modules_dir="/usr/lib/modules/\`uname -r\`/kernel/net/netfilter/ipvs"
for i in \`ls \$ipvs_modules_dir | sed -r 's#(.*).ko.xz#\1#'\`; do
/sbin/modinfo -F filename \$i &> /dev/null
if [ \$? -eq 0 ]; then
/sbin/modprobe \$i
fi
done
EOF

chmod +x /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
网络部署,(Flannel && calico)任选其一

Calico网络方式

  • IPIP
    是把一个IP数据包又套在一个IP包里,即把 IP 层封装到 IP 层的一个 tunnel,作用其实基本上就相当于一个基于IP层的网桥!一般来说,普通的网桥是基于mac层的,根本不需 IP,而这个 ipip 则是通过两端的路由做一个 tunnel,把两个本来不通的网络通过点对点连接起来。ipip 的源代码在内核 net/ipv4/ipip.c 中可以找到
  • BGP
    边界网关协议(Border Gateway Protocol, BGP)是互联网上一个核心的去中心化自治路由协议。它通过维护IP路由表或‘前缀’表来实现自治系统(AS)之间的可达性,属于矢量路由协议。BGP不使用传统的内部网关协议(IGP)的指标,而使用基于路径、网络策略或规则集来决定路由。因此,它更适合被称为矢量性协议,而不是路由协议。BGP,通俗的讲就是讲接入到机房的多条线路(如电信、联通、移动等)融合为一体,实现多线单IP,BGP 机房的优点:服务器只需要设置一个IP地址,最佳访问路由是由网络上的骨干路由器根据路由跳数与其它技术指标来确定的,不会占用服务器的任何系统

Calico 的核心组件:

  • Felix,Calico agent,跑在每台需要运行 workload 的节点上,主要负责配置路由及 ACLs 等信息来确保 endpoint 的连通状态;
  • etcd,分布式键值存储,主要负责网络元数据一致性,确保 Calico 网络状态的准确性;
  • BGP Client(BIRD), 主要负责把 Felix 写入 kernel 的路由信息分发到当前 Calico 网络,确保 workload 间的通信的有效性;

BGP Route Reflector(BIRD), 大规模部署时使用,摒弃所有节点互联的 mesh 模式,通过一个或者多个BGP Route Reflector来完成集中式的路由分发;
通过将整个互联网的可扩展 IP 网络原则压缩到数据中心级别,Calico 在每一个计算节点利用Linux kernel实现了一个高效的vRouter来负责数据转发而每个vRouter通过BGP
协议负责把自己上运行的 workload 的路由信息像整个 Calico 网络内传播 - 小规模部署可以直接互联,大规模下可通过指定的
BGP route reflector 来完成。这样保证最终所有的 workload 之间的数据流量都是通过 IP 包的方式完成互联的。

当容器创建时,calico为容器生成veth pair,一端作为容器网卡加入到容器的网络命名空间,并设置IP和掩码,一端直接暴露在宿主机上,
并通过设置路由规则,将容器IP暴露到宿主机的通信路由上。于此同时,calico为每个主机分配了一段子网作为容器可分配的IP范围,这样就可以根据子网的
CIDR为每个主机生成比较固定的路由规则。
当容器需要跨主机通信时,主要经过下面的简单步骤:
1)容器流量通过veth pair到达宿主机的网络命名空间上。
2)根据容器要访问的IP所在的子网CIDR和主机上的路由规则,找到下一跳要到达的宿主机IP。
3)流量到达下一跳的宿主机后,根据当前宿主机上的路由规则,直接到达对端容器的veth pair插在宿主机的一端,最终进入容器。

从上面的通信过程来看,跨主机通信时,整个通信路径完全没有使用NAT或者UDP封装,性能上的损耗确实比较低。但正式由于calico的通信机制是完全基于三层的,这种机制也带来了一些缺陷,例如:
1)calico目前只支持TCP、UDP、ICMP、ICMPv6协议,如果使用其他四层协议(例如NetBIOS协议),建议使用weave、原生overlay等其他overlay网络实现
2)基于三层实现通信,在二层上没有任何加密包装,因此只能在私有的可靠网络上使用。
3)流量隔离基于iptables实现,并且从etcd中获取需要生成的隔离规则,有一些性能上的隐患

calico/nodedocker容器运行在k8s的master和每个node节点上。由于它包含用于calico路由的BGPagent
calico-cni插件与kubelet组件一起部署在每个node节点上,用于当pod创建后,添加该pod到calico网路
calico/kube-policy-controller运行在k8s的pod里

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
配置NetworkManager
NetworkManager管理默认网络命名空间中接口的路由表的功能,可能会干扰Calico正确处理网络路由的能力。
创建一个配置文件/etc/NetworkManager/conf.d/calico.conf,来制止这种干扰:
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*



#node上执行
mkdir /opt/cni/bin /etc/cni/net.d -p
wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
tar xvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin/

#以下在master01上执行
mkdir -p ~/yaml/calico$ cd ~/yaml/calico
# 注意:下面是基于自建etcd做为存储的配置文件
curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -O

calico-etcd.yaml需要修改如下配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#Secret 配置修改
apiVersion: v1
kind: Secrettype: Opaque
metadata:  
name: calico-etcd-secrets
namespace: kube-system
data:  
etcd-key: (cat /opt/kubernetes/ssl/server-key.pem | base64 -w 0) # 将输出结果填写在这里
etcd-cert: (cat /opt/kubernetes/ssl/server.pem | base64 -w 0) # 将输出结果填写在这里
etcd-ca: (cat /opt/kubernetes/ssl/ca.pem | base64 -w 0) # 将输出结果填写在这里


#ConfigMap 配置修改
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key

#配置网卡自动发现规则
- name: IP_AUTODETECTION_METHOD
value: "interface=eth.*"
- name: IP6_AUTODETECTION_METHOD
value: "interface=eth.*"
- name: KUBERNETES_SERVICE_HOST
value: "lb.abc.com.cn"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
- name: KUBERNETES_SERVICE_PORT_HTTPS
value: "6443"

#检查Calico 模式设置,默认为ipip
- name: CALICO_IPV4POOL_IPIP
 value: "Always

[root@master01 calico]# kubectl apply -f calico-etcd.yaml
secret/calico-etcd-secrets created
configmap/calico-config created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

#这里需要注意,如果calico-kube-controllers报如下错误,需要在service里env添加KUBERNETES_SERVICE_HOST和KUBERNETES_SERVICE_PORT环境变量

E0820 02:39:51.732885 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.ServiceAccount: Get https://10.10.0.1:443/api/v1/serviceaccounts?limit=500&resourceVersion=0: dial tcp 10.10.0.1:443: i/o timeout

此外,这里也和网络插件选择有关系,如果选择的是flannel默认工作方式,工作是正常的.如果选择的是calico的ipip模式,需要在calico init环境里添加KUBERNETES_SERVICE_HOST和KUBERNETES_SERVICE_PORT环境变量,否则在添加pod时候会提示无法连接apiserver.
网络结构方面的因素:本文的测试环境为master和node不在同一个机器,用nginx负载均衡给后边三台主机,nginx未在三台master上,所以才需要calico init添加相关环境变量。

[root@master01 calico]# kubectl get pods -n kube-system | grep calico
calico-kube-controllers-cdd76d5d-qh8lj 1/1 Running 0 51s
calico-node-8qntz 1/1 Running 0 51s
calico-node-c7d5k 1/1 Running 0 51s
calico-node-xrxb5 1/1 Running 0 51s

[root@master01 calico]# kubectl get node
NAME STATUS ROLES AGE VERSION
node01 Ready <none> 18h v1.18.6
node02 Ready <none> 6h7m v1.18.6
node03 Ready <none> 6h7m v1.18.6

Calico 管理工具

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
wget wget -O /usr/local/bin/calicoctl https://github.com/projectcalico/calicoctl/releases/download/v3.14.2/calicoctl
chmod +x /usr/local/bin/calicoctl

[root@node01 ~]# calicoctl node status
Calico process is running.

IPv4 BGP status
+----------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+----------------+-------------------+-------+----------+-------------+
| 192.168.1.104 | node-to-node mesh | up | 10:27:12 | Established |
| 192.168.1.105 | node-to-node mesh | up | 10:27:13 | Established |
+----------------+-------------------+-------+----------+-------------+

IPv6 BGP status
No IPv6 peers found.

如果是用flannel,参考如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

添加环境变量和修改Network为kube-control-manager里cluster-cidr一致
- name: KUBERNETES_SERVICE_HOST
value: lb.abc.com.cn
- name: KUBERNETES_SERVICE_PORT
value: "6443"

net-conf.json: |
{
"Network": "10.20.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system

CoreDNS部署
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
yum install jq -y
cd ~/yaml
mkdir coredns
cd coredns
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
#默认情况下 CLUSTER_DNS_IP 是自动获取kube-dns的集群ip的,但是由于没有部署kube-dns所以只能手动指定一个集群ip
./deploy.sh -i 10.10.0.2 > coredns.yml
kubectl apply -f coredns.yml
kubectl get pods --namespace kube-system
kubectl get svc --namespace kube-system

#测试 Coredns 解析
[root@master01 coredns]# cat busybox.yml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28.4
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always

[root@master01 coredns]# kubectl apply -f busybox.yml
[root@master01 coredns]# kubectl exec -i busybox -n default nslookup kubernetes
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead.
Server: 10.10.0.2
Address 1: 10.10.0.2 kube-dns.kube-system.svc.cluster.local

Name: kubernetes
Address 1: 10.10.0.1 kubernetes.default.svc.cluster.local
验证集群状态
1
2
3
4
5
6
7
#打node 或者master 节点的标签
kubectl label node 192.168.1.101 node-role.kubernetes.io/master='master'
kubectl label node 192.168.1.104 node-role.kubernetes.io/node='node'
kubectl label node 192.168.1.105 node-role.kubernetes.io/node='node'
kubectl label node 192.168.1.106 node-role.kubernetes.io/node='node'

kubectl get node,cs
metrics遇到的问题
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
E0904 14:56:32.791786     852 available_controller.go:420] v1beta1.metrics.k8s.io failed with: failing or missing response from https://10.10.22.87:443/apis/metrics.k8s.io/v1beta1: bad status from https://10.10.22.87:443/apis/metrics.k8s.io/v1beta1: 403

解决有两个方式:
1.给system:annonymous权限
kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous

2.创建system:metrics-server角色并授权,参考[官网文档](https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/)

cat <<EOF > /opt/kubernetes/ssl/front-proxy-ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF

cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca

cat front-proxy-client-csr.json
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}

cfssl gencert -ca=front-proxy-ca.pem -ca-key=front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client

kube-apiserver添加如下参数,重启服务
--proxy-client-cert-file=/opt/kubernetes/ssl/front-proxy-client.pem \
--proxy-client-key-file=/opt/kubernetes/ssl/front-proxy-client-key.pem \
--requestheader-allowed-names=front-proxy-client \
--requestheader-client-ca-file=/opt/kubernetes/ssl/front-proxy-ca.pem \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--runtime-config=api/all=true"

重启服务后通过一下命令查看配置是否生成相应configmap
kubectl get configmap extension-apiserver-authentication -nkube-system -o yaml

[ref]
Kubelet 证书如何自动续期
kubelet 证书自动续期
k8s高可用二进制部署使用Calico网络方案
Kubernetes v1.18.2 二进制高可用部署
calico网络原理、组网方式和使用
CentOS7升级系统内核至4.4.xx版本
kubernetes实验:k8s与各网络插件集成
kubernetes环境中flannel网络插件的DNS与hosts文件的优先级问题
Kubernetes DNS 高阶指南
Kubeadm Failed to create SubnetManager: error retrieving pod spec for kube-system
RBAC DENY 解决办法
Kubernetes Metrics-Server介绍及源码分析
为什么我无法在 Amazon EKS 中使用 Metrics Server 从容器、pod 或节点收集指标?
Kubernetes集群node无法访问service:kube-proxy没有正确设置cluster-cidr