Jusene's Blog

kubernetes v1.18.2 二进制高可用部署手册

字数统计: 9.9k阅读时长: 60 min
2020/08/08 Share

环境

服务器信息

主机名 IP 备注 |
k8s-master1 10.211.55.36 Master,etcd1,node
k8s-master2 10.211.55.37 Master,etcd2,node
k8s-master3 10.211.55.38 Master,etcd3,node
ha 10.211.55.39 haproxy, keepalived
  • 10.10.0.1 为集群kubernetes svc解析ip
  • calico使用IPIP模式
  • k8s集群使用ipvs模式
  • docker ce version 19.03.6
  • kubernetes version 1.18.2
  • etcd version v3.4.7
  • flannel version v1.12
  • coredns version 1.6.7
  • metrics-server version v0.3.6

网络划分

名称 IP网段 备注 |
cluster network 10.10.0.0/16 可用IP 65534 |
pod network 10.20.0.0/16 可用IP 65534
coredns 10.10.0.2 kubernetes域名解析服务 |
k8s svc 10.10.0.1 集群kubernetes svc解析IP|
vip 10.211.55.100 ha vip

初始化环境

  • init.sh
1
#!/bin/bash
2
3
# 设置主机名
4
hostname=$1
5
if [ -n "$hostname" ];then
6
    hostnamectl set-hostname $hostname
7
else
8
    echo "Usage: init.sh 主机名"
9
    exit 1
10
fi
11
# 关闭防火墙
12
systemctl stop firewalld
13
systemctl disable firewalld
14
15
# 关闭selinux
16
setenforce 0
17
sed -i "s/^SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config
18
19
# 关闭swap
20
swapoff -a
21
sed -i 's/.*swap.*/#&/' /etc/fstab
22
23
## 安装依赖环境
24
yum install -y bind-utils bzip2 git nfs-utils curl yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools wget vim  ntpdate libseccomp libtool-ltdl telnet
25
#echo -e "\033[32;32m 升级Centos7系统内核版本,解决kube-proxy ipvs版本兼容问题\033[0m \n"
26
#rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org && \
27
#yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm && \
28
#yum install -y kernel-lt --enablerepo=elrepo-kernel && \
29
#grub2-set-default "CentOS Linux (4.4.234-1.el7.elrepo.x86_64) 7 (Core)" &&\
30
#grub2-editenv list
31
32
33
echo -e "\033[32;32m 升级Iptables版本,kube-proxy ipvs版本兼容问题\033[0m \n"
34
yum install -y gcc make libnftnl-devel libmnl-devel autoconf automake libtool bison flex libnetfilter_conntrack-devel libnetfilter_queue-devel libpcap-devel
35
wget https://www.netfilter.org/projects/iptables/files/iptables-1.6.2.tar.bz2
36
tar -xvf iptables-1.6.2.tar.bz2
37
cd iptables-1.6.2
38
./autogen.sh
39
./configure
40
make -j4
41
make install
42
43
44
cat <<EOF >  /etc/sysctl.d/k8s.conf
45
# 修复ipvs模式下长连接timeout问题,缺省2小时
46
net.ipv4.tcp_keepalive_time = 600
47
# 探测频率,缺省75秒
48
net.ipv4.tcp_keepalive_intvl = 30
49
# 在认定连接失效之前,发送多少个TCP的keepalive探测包,缺省值是9
50
net.ipv4.tcp_keepalive_probes = 10
51
# 关闭ipv6
52
net.ipv6.conf.all.disable_ipv6 = 1
53
net.ipv6.conf.default.disable_ipv6 = 1
54
net.ipv6.conf.lo.disable_ipv6 = 1
55
# 决定检查过期多久邻居条目
56
net.ipv4.neigh.default.gc_stale_time = 120
57
# 使用arp_announce / arp_ignore解决ARP映射问题
58
net.ipv4.conf.default.arp_announce = 2
59
net.ipv4.conf.lo.arp_announce = 2
60
net.ipv4.conf.all.arp_announce = 2
61
# 开启路由转发
62
net.ipv4.ip_forward = 1
63
# 网络连接问题
64
net.ipv4.tcp_max_tw_buckets = 5000
65
net.ipv4.tcp_syncookies = 1
66
net.ipv4.tcp_max_syn_backlog = 1024
67
net.ipv4.tcp_synack_retries = 2
68
# 要求iptables不对bridge的数据进行处理
69
net.bridge.bridge-nf-call-ip6tables = 1
70
net.bridge.bridge-nf-call-iptables = 1
71
net.bridge.bridge-nf-call-arptables = 1
72
net.netfilter.nf_conntrack_max = 2310720
73
fs.inotify.max_user_watches=89100
74
fs.may_detach_mounts = 1
75
fs.file-max = 52706963
76
fs.nr_open = 52706963
77
vm.swappiness = 0
78
vm.overcommit_memory=1
79
vm.panic_on_oom=0
80
EOF
81
82
sysctl -p /etc/sysctl.d/k8s.conf
83
84
## ipvs内核加载
85
module=(
86
ip_vs
87
ip_vs_rr
88
ip_vs_wrr
89
ip_vs_sh
90
nf_conntrack
91
br_netfilter
92
  )
93
for kernel_module in ${module[@]};do
94
    /sbin/modinfo -F filename $kernel_module |& grep -qv ERROR && echo $kernel_module >> /etc/modules-load.d/ipvs.conf || :
95
done
96
systemctl enable --now systemd-modules-load.service
97
98
## 安装docker
99
100
curl -fsSL "https://get.docker.com/" | bash -s -- --mirror Aliyun
101
sed  -i '/ExecStart=/i ExecStartPost=\/sbin\/iptables -P FORWARD ACCEPT' /usr/lib/systemd/system/docker.service
102
yum install -y bash-completion && cp /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
103
systemctl enable docker --now
104
echo '{"registry-mirrors": ["https://4xr1qpsp.mirror.aliyuncs.com"], "log-opts": {"max-size":"500m", "max-file":"3"}}' > /etc/docker/daemon.json
105
106
reboot

kubernetes 部署

部署ha

1
yum install -y haproxy keepalived
  • haproxy
1
global
2
  maxconn  2000
3
  ulimit-n  16384
4
  log  127.0.0.1 local0 err
5
  stats timeout 30s
6
7
defaults
8
  log global
9
  mode  http
10
  option  httplog
11
  timeout connect 5000
12
  timeout client  50000
13
  timeout server  50000
14
  timeout http-request 15s
15
  timeout http-keep-alive 15s
16
17
frontend monitor-in
18
  bind *:33305
19
  mode http
20
  option httplog
21
  monitor-uri /monitor
22
23
listen stats
24
  bind    *:8006
25
  mode    http
26
  stats   enable
27
  stats   hide-version
28
  stats   uri       /stats
29
  stats   refresh   30s
30
  stats   realm     Haproxy\ Statistics
31
  stats   auth      admin:admin
32
33
frontend k8s-api
34
  bind 0.0.0.0:6443
35
  bind 127.0.0.1:6443
36
  mode tcp
37
  option tcplog
38
  tcp-request inspect-delay 5s
39
  default_backend k8s-api
40
41
backend k8s-api
42
  mode tcp
43
  option tcplog
44
  option tcp-check
45
  balance roundrobin
46
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 2500 maxqueue 500 weight 100
47
  server k8s-api-1      10.211.55.36:6443  check
48
  server k8s-api-2      10.211.55.37:6443  check
49
  server k8s-api-3      10.211.55.38:6443  check
  • keepalived
1
global_defs {
2
    enable_script_security
3
}
4
5
vrrp_script haproxy-check {
6
    user root
7
    script "/bin/bash /etc/keepalived/check_haproxy.sh"
8
    interval 3
9
    weight -2
10
    fall 10
11
    rise 2
12
}
13
14
vrrp_instance haproxy-vip {
15
    state BACKUP
16
    priority 101
17
    interface eth0
18
    virtual_router_id 47
19
    advert_int 3
20
21
    unicast_peer {
22
        10.211.55.39
23
    }
24
25
    virtual_ipaddress {
26
        10.211.55.100/32 dev eth0 label eth0:0
27
    }
28
29
    track_script {
30
        haproxy-check
31
    }
32
}
  • check_haproxy.sh
1
#!/bin/bash
2
VIRTUAL_IP=10.211.55.100
3
4
errorExit() {
5
    echo "*** $*" 1>&2
6
    exit 1
7
}
8
9
if ip addr | grep -q $VIRTUAL_IP ; then
10
    curl -s --max-time 2 --insecure https://${VIRTUAL_IP}:6443/ -o /dev/null || errorExit "Error GET https://${VIRTUAL_IP}:6443/"
11
fi

自签TLS证书

  • 下载证书生成工具
1
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
2
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
3
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
4
chmod +x /usr/local/bin/cfssl*
5
6
mkdir tls
7
cd tls
8
cat certificate.sh
  • certificate.sh
1
cat > ca-config.json << EOF
2
{
3
    "signing": {
4
        "default": {
5
            "expiry": "87600h"
6
        },
7
        "profiles": {
8
            "kubernetes": {
9
                "expiry": "87600h",
10
                "usages": [
11
                    "signing",
12
                    "key encipherment",
13
                    "server auth",
14
                    "client auth"
15
                ]
16
            }
17
        }
18
    }
19
}
20
EOF
21
22
cat > ca-csr.json << EOF
23
{
24
    "CN": "kubernetes",
25
    "key": {
26
        "algo": "rsa",
27
        "size": 2048
28
    },
29
    "names": [
30
        {
31
            "C": "CN",
32
            "L": "Zhejiang",
33
            "ST": "Hangzhou",
34
            "O": "k8s",
35
            "OU": "System"
36
        }
37
    ]
38
}
39
EOF
40
41
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
42
43
#------------
44
45
cat > server-csr.json << EOF
46
{
47
    "CN": "kubernetes",
48
    "hosts": [
49
        "127.0.0.1",
50
        "10.211.55.36",
51
        "10.211.55.37",
52
        "10.211.55.38",
53
        "10.211.55.100",
54
        "10.10.0.1",
55
        "kubernetes",
56
        "kubernetes.default",
57
        "kubernetes.default.svc",
58
        "kubernetes.default.svc.cluster",
59
        "kubernetes.default.svc.cluster.local"
60
    ],
61
    "key": {
62
        "algo": "rsa",
63
        "size": 2048
64
    },
65
    "names": [
66
        {
67
            "C": "CN",
68
            "L": "Zhejiang",
69
            "ST": "Hangzhou",
70
            "O": "k8s",
71
            "OU": "System"
72
        }
73
    ]
74
}
75
EOF
76
77
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
78
79
#-------------
80
81
cat > admin-csr.json << EOF
82
{
83
    "CN": "admin",
84
    "hosts": [],
85
    "key": {
86
        "algo": "rsa",
87
        "size": 2048
88
    },
89
    "names": [
90
        {
91
        "C": "CN",
92
        "L": "Zhejiang",
93
        "ST": "Hangzhou",
94
        "O": "system:masters",
95
        "OU": "System"
96
        }
97
    ]
98
}
99
EOF
100
101
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
102
103
#----------------
104
105
cat > kube-proxy-csr.json << EOF
106
{
107
    "CN": "system:kube-proxy",
108
    "hosts": [],
109
    "key": {
110
        "algo": "rsa",
111
        "size": 2048
112
    },
113
    "names": [
114
        {
115
            "C": "CN",
116
            "L": "Zhejiang",
117
            "ST": "Hangzhou",
118
            "O": "k8s",
119
            "OU": "System"
120
        }
121
    ]
122
}
123
EOF
124
125
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
126
127
128
#----------
129
# "CN": "system:metrics-server" 一定是这个,后面授权会用到
130
cat > metrics-server-csr.json << EOF
131
{
132
    "CN": "system:metrics-server",
133
    "hosts": [],
134
    "key": {
135
        "algo": "rsa",
136
        "size": 2048
137
    },
138
    "names": [
139
        {
140
            "C": "CN",
141
            "ST": "Zhejiang",
142
            "L": "Hangzhou",
143
            "O": "k8s",
144
            "OU": "System"
145
        }
146
    ]
147
}
148
EOF
149
150
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes metrics-server-csr.json | cfssljson -bare metrics-server

部署etcd集群

https://github.com/etcd-io/etcd/releases/download/v3.4.7/etcd-v3.4.7-linux-amd64.tar.gz

1
# 在其他节点一样操作,把etcd,etcdctl拷贝到其他master节点
2
mkdir -p /etc/kubernetes/ssl /etc/etcd
3
cp ca*pem server*pem /etc/kubernetes/ssl
4
cp etcd etcdctl /usr/local/bin
  • etcd.sh
1
#!/bin/bash
2
3
ETCD_NAME=${1:-"etcd01"}
4
ETCD_IP=${2:-"127.0.0.1"}
5
ETCD_CLUSTER=${3:-"etcd01=https://127.0.0.1:2380"}
6
7
mkdir -p /ddhome/etcd/{data,wal}
8
9
cat << EOF > /etc/etcd/etcd.yml
10
name: ${ETCD_NAME}
11
data-dir: /ddhome/etcd/data
12
wal-dir: /ddhome/etcd/wal
13
snapshot-count: 5000
14
heartbeat-interval: 100
15
election-timeout: 1000
16
quota-backend-bytes: 0
17
max-snapshots: 3
18
max-wals: 5
19
listen-peer-urls: https://${ETCD_IP}:2380
20
listen-client-urls: https://${ETCD_IP}:2379,http://127.0.0.1:2379
21
advertise-client-urls: https://${ETCD_IP}:2379
22
initial-advertise-peer-urls: https://${ETCD_IP}:2380
23
initial-cluster: ${ETCD_CLUSTER}
24
initial-cluster-token: etcd-cluster
25
initial-cluster-state: new
26
27
client-transport-security:
28
  cert-file: /etc/kubernetes/ssl/server.pem
29
  key-file: /etc/kubernetes/ssl/server-key.pem
30
  client-cert-auth: false
31
  trusted-ca-file: /etc/kubernetes/ssl/ca.pem
32
  auto-tls: false
33
peer-transport-security:
34
  cert-file: /etc/kubernetes/ssl/server.pem
35
  key-file: /etc/kubernetes/ssl/server-key.pem
36
  client-cert-auth: false
37
  trusted-ca-file: /etc/kubernetes/ssl/ca.pem
38
  auth-tls: false
39
debug: false
40
logger: zap
41
log-outputs: [stderr]
42
force-new-cluster: false
43
EOF
44
45
cat << EOF > /usr/lib/systemd/system/etcd.service
46
[Unit]
47
Description=Etcd Server
48
After=network.target
49
After=network-online.target
50
Wants=network-online.target
51
52
[Service]
53
Type=notify
54
LimitNOFILE=65535
55
Restart=on-failure
56
RestartSec=5s
57
TimeoutStartSec=0
58
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.yml
59
60
[Install]
61
WantedBy=multi-user.target
62
EOF
63
64
systemctl daemon-reload
65
systemctl enable etcd --now

k8s-master1:

1
./etcd.sh etcd01 10.211.55.36 etcd01=https://10.211.55.36:2380,etcd02=https://10.211.55.37:2380,etcd03=https://10.211.55.38:2380

k8s-master2:

1
./etcd.sh etcd02 10.211.55.37 etcd01=https://10.211.55.36:2380,etcd02=https://10.211.55.37:2380,etcd03=https://10.211.55.38:2380

k8s-master3:

1
./etcd.sh etcd03 10.211.55.38 etcd01=https://10.211.55.36:2380,etcd02=https://10.211.55.37:2380,etcd03=https://10.211.55.38:2380

检查集群状态:

1
ETCDCTL_API=3 etcdctl --write-out=table --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/server.pem --key=/etc/kubernetes/ssl/server-key.pem --endpoints=https://10.211.55.36:2379,https://10.211.55.37:2379,https://10.211.55.38:2379 endpoint status
2
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
3
|         ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
4
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
5
| https://10.211.55.36:2379 | 54927c604e260c07 |   3.4.7 |   20 kB |      true |      false |        62 |          9 |                  9 |        |
6
| https://10.211.55.37:2379 | 86a5630dff79475c |   3.4.7 |   20 kB |     false |      false |        62 |          9 |                  9 |        |
7
| https://10.211.55.38:2379 | 3c25990de93171eb |   3.4.7 |   20 kB |     false |      false |        62 |          9 |                  9 |        |
8
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

部署kubernetes

1
# 准备kubernetes二进制包
2
docker run -itd --name=tmp --rm jusene/kubernetes-1.18.2-bin sleep 300
3
docker cp tmp:/kubernetes-server-linux-amd64.tar.gz .
4
tar xf kubernetes-server-linux-amd64.tar.gz

master节点需要用到:

1
cp kubernetes/server/bin/{kubectl,kube-scheduler,kube-apiserver,kube-controller-manager} /usr/local/bin
2
3
# 复制到其他master节点

node节点需要用到的:

1
cp kubernetes/server/bin/{kubelet,kube-proxy} /usr/local/bin
2
3
# 复制到其他node节点

配置Master组件并运行

准备

1
mkdir -p /var/log/kubernetes
2
touch /var/log/kubernetes/k8s-audit.log
  • token.csv
1
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d " ")
2
cat > token.csv << EOF
3
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
4
EOF
  • apiserver.sh
1
#!/bin/bash
2
3
MASTER_ADDRESS=${1:-"10.211.55.100"}
4
ETCD_SERVERS=${2:-"http://127.0.0.1:2379"}
5
6
cat << EOF > /etc/kubernetes/kube-apiserver
7
KUBE_APISERVER_OPTS="--logtostderr=false \\
8
--v=2 \\
9
--log-dir=/var/log/kubernetes \\
10
--etcd-servers=${ETCD_SERVERS} \\
11
--bind-address=0.0.0.0 \\
12
--secure-port=6443 \\
13
--advertise-address=${MASTER_ADDRESS} \\
14
--allow-privileged=true \\
15
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
16
--service-cluster-ip-range=10.10.0.0/16 \
17
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodPreset \\
18
--authorization-mode=RBAC,Node \\
19
--kubelet-https=true \\
20
--enable-bootstrap-token-auth=true \\
21
--token-auth-file=/etc/kubernetes/token.csv \\
22
--service-node-port-range=30000-50000 \\
23
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
24
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
25
--tls-cert-file=/etc/kubernetes/ssl/server.pem \\
26
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
27
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
28
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
29
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
30
--etcd-certfile=/etc/kubernetes/ssl/server.pem \\
31
--etcd-keyfile=/etc/kubernetes/ssl/server-key.pem \\
32
--requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem \\
33
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
34
--requestheader-group-headers=X-Remote-Group \\
35
--requestheader-username-headers=X-Remote-User \\
36
--proxy-client-cert-file=/etc/kubernetes/ssl/metrics-server.pem \\
37
--proxy-client-key-file=/etc/kubernetes/ssl/metrics-server-key.pem \\
38
--runtime-config=api/all=true \\
39
--audit-log-maxage=30 \\
40
--audit-log-maxbackup=3 \\
41
--audit-log-maxsize=100 \\
42
--audit-log-truncate-enabled=true \\
43
--audit-log-path=/var/log/kubernetes/k8s-audit.log"
44
EOF
45
46
cat << EOF > /usr/lib/systemd/system/kube-apiserver.service
47
[Unit]
48
Description=Kubernetes API Server
49
Documentation=https://github.com/kubernetes/kubernetes
50
51
[Service]
52
EnvironmentFile=-/etc/kubernetes/kube-apiserver
53
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
54
Restart=on-failure
55
56
[Install]
57
WantedBy=multi-user.target
58
EOF
59
60
systemctl daemon-reload
61
systemctl enable kube-apiserver --now
1
./apiserver.sh 10.211.55.36 https://10.211.55.36:2379,https://10.211.55.37:2379,https://10.211.55.38:2379
2
./apiserver.sh 10.211.55.37 https://10.211.55.36:2379,https://10.211.55.37:2379,https://10.211.55.38:2379
3
./apiserver.sh 10.211.55.38 https://10.211.55.36:2379,https://10.211.55.37:2379,https://10.211.55.38:2379
  • controller-manager.sh
1
#!/bin/bash
2
3
MASTER_ADDRESS=${1:-"127.0.0.1"}
4
5
cat << EOF > /etc/kubernetes/kube-controller-manager
6
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
7
--v=2 \\
8
--master=${MASTER_ADDRESS}:8080 \\
9
--leader-elect=true \\
10
--bind-address=0.0.0.0 \\
11
--service-cluster-ip-range=10.10.0.0/16 \\
12
--cluster-name=kubernetes \\
13
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
14
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\
15
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
16
--experimental-cluster-signing-duration=87600h0m0s \\
17
--feature-gates=RotateKubeletServerCertificate=true \\
18
--feature-gates=RotateKubeletClientCertificate=true \\
19
--allocate-node-cidrs=true \\
20
--cluster-cidr=10.20.0.0/16 \\
21
--root-ca-file=/etc/kubernetes/ssl/ca.pem"
22
EOF
23
24
cat << EOF > /usr/lib/systemd/system/kube-controller-manager.service
25
[Unit]
26
Description=Kubernetes Controller Manager
27
Documentation=https://github.com/kubernetes/kubernetes
28
29
[Service]
30
EnvironmentFile=-/etc/kubernetes/kube-controller-manager
31
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
32
Restart=on-failure
33
34
[Install]
35
WantedBy=multi-user.target
36
EOF
37
38
systemctl daemon-reload
39
systemctl enable kube-controller-manager --now
1
./controller-manager.sh 127.0.0.1
  • scheduler.sh
1
#!/bin/bash
2
3
MASTER_ADDRESS=${1:-"127.0.0.1"}
4
cat << EOF > /etc/kubernetes/kube-scheduler
5
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
6
--v=2 \\
7
--master=${MASTER_ADDRESS}:8080 \\
8
--address=0.0.0.0 \\
9
--leader-elect"
10
EOF
11
12
cat << EOF > /usr/lib/systemd/system/kube-scheduler.service
13
[Unit]
14
Description=Kubernetes Scheduler
15
Documentation=https://github.com/kubernetes/kubernetes
16
17
[Service]
18
EnvironmentFile=-/etc/kubernetes/kube-scheduler
19
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
20
Restart=on-failure
21
22
[Install]
23
WantedBy=multi-user.target
24
EOF
25
26
systemctl daemon-reload
27
systemctl enable kube-scheduler --now
1
./scheduler.sh 127.0.0.1

生成管理员config

1
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://10.211.55.100:6443
2
3
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true
4
5
kubectl config set-context kubernetes --cluster=kubernetes --user=admin
6
7
kubectl config use-context kubernetes

检查master节点

1
kubectl get cs
2
NAME                 STATUS    MESSAGE             ERROR
3
controller-manager   Healthy   ok                  
4
scheduler            Healthy   ok                  
5
etcd-1               Healthy   {"health":"true"}   
6
etcd-2               Healthy   {"health":"true"}   
7
etcd-0               Healthy   {"health":"true"}

将admin kubeconfig复制到其他master节点

1
scp ~/.kube/config k8s-master02:~/.kube/

配置kubelet证书自动续期和创建node授权用户

  • 创建node授权用户
1
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
  • 创建csr请求的clusterrole
1
kind: ClusterRole
2
apiVersion: rbac.authorization.k8s.io/v1
3
metadata:
4
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
5
rules:
6
- apiGroups: ["certificates.k8s.io"]
7
  resources: ["certificatesigningrequests/selfnodeserver"]
8
  verbs: ["create"]
1
kubectl apply -f tls-instructs-csr.yaml
  • 自动批准kubelet-bootstrap用户TLS bootstrapping首次申请证书的csr请求
1
kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --user=kubelet-bootstrap
  • 自动批准system:nodes组用户更新kubelet自身与apiserver通讯证书的csr请求
1
kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
  • 自动批准system:node组用户更新kubelet 10250 api端口证书的csr请求
1
kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes

配置kubeconfig

  • bootstrap.kubeconfig
1
export KUBE_APISERVER="https://10.211.55.100:6443"
2
# 和token.csv中的一致
3
export BOOTSTRAP_TOKEN="5a840c1dc3520c7255708a6a1a4010b2"
4
5
# 设置集群参数
6
kubectl config set-cluster kubernetes \
7
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
8
--embed-certs=true \
9
--server=${KUBE_APISERVER} \
10
--kubeconfig=/etc/kubernetes/bootstrap.kubeconfig
11
12
# 设置客户端认证参数
13
kubectl config set-credentials kubelet-bootstrap \
14
--token=${BOOTSTRAP_TOKEN} \
15
--kubeconfig=/etc/kubernetes/bootstrap.kubeconfig
16
17
# 设置上下文参数
18
kubectl config set-context default \
19
--cluster=kubernetes \
20
--user=kubelet-bootstrap \
21
--kubeconfig=/etc/kubernetes/bootstrap.kubeconfig
22
23
# 设置默认上下文
24
kubectl config use-context default --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig
  • kube-proxy.kubeconfig
1
kubectl config set-cluster kubernetes \
2
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
3
--embed-certs=true \
4
--server=${KUBE_APISERVER} \
5
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
6
7
kubectl config set-credentials kube-proxy \
8
--client-certificate=/root/tls/kube-proxy.pem \
9
--client-key=/root/tls/kube-proxy-key.pem \
10
--embed-certs=true \
11
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
12
13
kubectl config set-context default \
14
--cluster=kubernetes \
15
--user=kube-proxy \
16
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig 
17
18
kubectl config use-context default --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

解决无法访问日志

  • apiserver-to-kubelet-rbac.yaml
1
kind: ClusterRoleBinding
2
apiVersion: rbac.authorization.k8s.io/v1
3
metadata:
4
  name: kubelet-api-admin
5
subjects:
6
- kind: User
7
  name: kubernetes
8
  apiGroup: rbac.authorization.k8s.io
9
roleRef:
10
  kind: ClusterRole
11
  name: system:kubelet-api-admin
12
  apiGroup: rbac.authorization.k8s.io

部署node节点

  • kubelet.sh
1
#!/bin/bash
2
3
DNS_SERVER_IP=${1:-"10.10.0.2"}
4
HOSTNAME=${2:-"`hostname`"}
5
CLUSTERDOMAIN=${3:-"cluster.local"}
6
7
cat << EOF > /etc/kubernetes/kubelet
8
KUBELET_OPTS="--logtostderr=true \\
9
--v=2 \\
10
--hostname-override=${HOSTNAME} \\
11
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
12
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \\
13
--config=/etc/kubernetes/kubelet-config.yml \\
14
--cert-dir=/etc/kubernetes/ssl \\
15
--network-plugin=cni \\
16
--cni-conf-dir=/etc/cni/net.d \\
17
--cni-bin-dir=/opt/cni/bin \\
18
--pod-infra-container-image=jusene/pause-amd64:3.2"
19
EOF
20
21
cat << EOF > /etc/kubernetes/kubelet-config.yml
22
kind: KubeletConfiguration
23
apiVersion: kubelet.config.k8s.io/v1beta1
24
address: 0.0.0.0
25
port: 10250
26
readOnlyPort: 10255
27
cgroupDriver: cgroupfs
28
clusterDNS:
29
  - ${DNS_SERVER_IP}
30
clusterDomain: ${CLUSTERDOMAIN}
31
failSwapOn: false
32
33
# 身份验证
34
authentication:
35
  anonymous:
36
    enabled: false
37
  webhook:
38
    cacheTTL: 2m0s
39
    enabled: true
40
  x509:
41
    clientCAFile: /etc/kubernetes/ssl/ca.pem
42
43
# 授权
44
authorization:
45
  mode: Webhook
46
  webhook:
47
    cacheAuthorizedTTL: 5m0s
48
    cacheUnauthorizedTTL: 30s
49
50
# Node 资源保留
51
evictionHard:
52
  imagefs.available: 15%
53
  memory.available: 1G
54
  nodefs.available: 10%
55
  nodefs.inodesFree: 5%
56
evictionPressureTransitionPeriod: 5m0s
57
58
# 镜像删除策略
59
imageGCHighThresholdPercent: 85
60
imageGCLowThresholdPercent: 80
61
imageMinimumGCAge: 2m0s
62
63
# 旋转证书
64
rotateCertificates: true
65
featureGates:
66
  RotateKubeletServerCertificate: true
67
  RotateKubeletClientCertificate: true
68
69
maxOpenFiles: 1000000
70
maxPods: 110
71
EOF
72
73
cat << EOF > /usr/lib/systemd/system/kubelet.service
74
[Unit]
75
Description=Kubernetes Kubelet
76
After=docker.service
77
Requires=docker.service
78
79
[Service]
80
EnvironmentFile=-/etc/kubernetes/kubelet
81
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
82
Restart=on-failure
83
KillMode=process
84
85
[Install]
86
WantedBy=multi-user.target
87
EOF
88
89
systemctl daemon-reload
90
systemctl enable kubelet --now
1
./kubelet.sh 10.10.0.2 k8s-master1 cluster.local
  • proxy.sh
1
#!/bin/bash
2
3
HOSTNAME=${1:-"`hostname`"}
4
5
cat << EOF > /etc/kubernetes/kube-proxy
6
KUBE_PROXY_OPTS="--logtostderr=true \\
7
--v=2 \\
8
--config=/etc/kubernetes/kube-proxy-config.yml"
9
EOF
10
11
cat << EOF > /etc/kubernetes/kube-proxy-config.yml
12
kind: KubeProxyConfiguration
13
apiVersion: kubeproxy.config.k8s.io/v1alpha1
14
address: 0.0.0.0
15
metricsBindAddress: 0.0.0.0:10249
16
clientConnection:
17
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
18
hostnameOverride: ${HOSTNAME}
19
clusterCIDR: 10.20.0.0/16
20
mode: ipvs
21
ipvs:
22
  scheduler: "rr"
23
iptables:
24
  masqueradeAll: false
25
  masqueradeBit: 14
26
  minSyncPeriod: 0s
27
  syncPeriod: 30s
28
EOF
29
30
cat << EOF > /usr/lib/systemd/system/kube-proxy.service
31
[Unit]
32
Description=Kubernetes Proxy
33
After=network.target
34
35
[Service]
36
EnvironmentFile=-/etc/kubernetes/kube-proxy
37
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
38
Restart=on-failure
39
40
[Install]
41
WantedBy=multi-user.target
42
EOF
43
44
systemctl daemon-reload
45
systemctl enable kube-proxy --now
1
./proxy.sh k8s-master1
  • 查看节点信息
1
kubectl get node
2
NAME          STATUS     ROLES    AGE   VERSION
3
k8s-master1   NotReady   <none>   46m   v1.18.2
4
k8s-master2   NotReady   <none>   51s   v1.18.2
5
k8s-master3   NotReady   <none>   40s   v1.18.2

安装flannel网络

  • kube-flannel.yaml
1
---
2
apiVersion: policy/v1beta1
3
kind: PodSecurityPolicy
4
metadata:
5
  name: psp.flannel.unprivileged
6
  annotations:
7
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
8
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
9
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
10
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
11
spec:
12
  privileged: false
13
  volumes:
14
    - configMap
15
    - secret
16
    - emptyDir
17
    - hostPath
18
  allowedHostPaths:
19
    - pathPrefix: "/etc/cni/net.d"
20
    - pathPrefix: "/etc/kube-flannel"
21
    - pathPrefix: "/run/flannel"
22
  readOnlyRootFilesystem: false
23
  # Users and groups
24
  runAsUser:
25
    rule: RunAsAny
26
  supplementalGroups:
27
    rule: RunAsAny
28
  fsGroup:
29
    rule: RunAsAny
30
  # Privilege Escalation
31
  allowPrivilegeEscalation: false
32
  defaultAllowPrivilegeEscalation: false
33
  # Capabilities
34
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
35
  defaultAddCapabilities: []
36
  requiredDropCapabilities: []
37
  # Host namespaces
38
  hostPID: false
39
  hostIPC: false
40
  hostNetwork: true
41
  hostPorts:
42
  - min: 0
43
    max: 65535
44
  # SELinux
45
  seLinux:
46
    # SELinux is unused in CaaSP
47
    rule: 'RunAsAny'
48
---
49
kind: ClusterRole
50
apiVersion: rbac.authorization.k8s.io/v1beta1
51
metadata:
52
  name: flannel
53
rules:
54
  - apiGroups: ['extensions']
55
    resources: ['podsecuritypolicies']
56
    verbs: ['use']
57
    resourceNames: ['psp.flannel.unprivileged']
58
  - apiGroups:
59
      - ""
60
    resources:
61
      - pods
62
    verbs:
63
      - get
64
  - apiGroups:
65
      - ""
66
    resources:
67
      - nodes
68
    verbs:
69
      - list
70
      - watch
71
  - apiGroups:
72
      - ""
73
    resources:
74
      - nodes/status
75
    verbs:
76
      - patch
77
---
78
kind: ClusterRoleBinding
79
apiVersion: rbac.authorization.k8s.io/v1beta1
80
metadata:
81
  name: flannel
82
roleRef:
83
  apiGroup: rbac.authorization.k8s.io
84
  kind: ClusterRole
85
  name: flannel
86
subjects:
87
- kind: ServiceAccount
88
  name: flannel
89
  namespace: kube-system
90
---
91
apiVersion: v1
92
kind: ServiceAccount
93
metadata:
94
  name: flannel
95
  namespace: kube-system
96
---
97
kind: ConfigMap
98
apiVersion: v1
99
metadata:
100
  name: kube-flannel-cfg
101
  namespace: kube-system
102
  labels:
103
    tier: node
104
    app: flannel
105
data:
106
  cni-conf.json: |
107
    {
108
      "name": "cbr0",
109
      "cniVersion": "0.3.1",
110
      "plugins": [
111
        {
112
          "type": "flannel",
113
          "delegate": {
114
            "hairpinMode": true,
115
            "isDefaultGateway": true
116
          }
117
        },
118
        {
119
          "type": "portmap",
120
          "capabilities": {
121
            "portMappings": true
122
          }
123
        }
124
      ]
125
    }
126
  net-conf.json: |
127
    {
128
      "Network": "10.20.0.0/16",
129
      "Backend": {
130
        "Type": "vxlan"
131
      }
132
    }
133
---
134
apiVersion: apps/v1
135
kind: DaemonSet
136
metadata:
137
  name: kube-flannel-ds-amd64
138
  namespace: kube-system
139
  labels:
140
    tier: node
141
    app: flannel
142
spec:
143
  selector:
144
    matchLabels:
145
      app: flannel
146
  template:
147
    metadata:
148
      labels:
149
        tier: node
150
        app: flannel
151
    spec:
152
      affinity:
153
        nodeAffinity:
154
          requiredDuringSchedulingIgnoredDuringExecution:
155
            nodeSelectorTerms:
156
              - matchExpressions:
157
                  - key: kubernetes.io/os
158
                    operator: In
159
                    values:
160
                      - linux
161
                  - key: kubernetes.io/arch
162
                    operator: In
163
                    values:
164
                      - amd64
165
      hostNetwork: true
166
      priorityClassName: system-node-critical
167
      tolerations:
168
      - operator: Exists
169
        effect: NoSchedule
170
      serviceAccountName: flannel
171
      initContainers:
172
      - name: install-cni
173
        image: quay.io/coreos/flannel:v0.12.0-amd64
174
        command:
175
        - cp
176
        args:
177
        - -f
178
        - /etc/kube-flannel/cni-conf.json
179
        - /etc/cni/net.d/10-flannel.conflist
180
        volumeMounts:
181
        - name: cni
182
          mountPath: /etc/cni/net.d
183
        - name: flannel-cfg
184
          mountPath: /etc/kube-flannel/
185
      containers:
186
      - name: kube-flannel
187
        image: quay.io/coreos/flannel:v0.12.0-amd64
188
        command:
189
        - /opt/bin/flanneld
190
        args:
191
        - --ip-masq
192
        - --kube-subnet-mgr
193
        resources:
194
          requests:
195
            cpu: "100m"
196
            memory: "50Mi"
197
          limits:
198
            cpu: "100m"
199
            memory: "50Mi"
200
        securityContext:
201
          privileged: false
202
          capabilities:
203
            add: ["NET_ADMIN", "NET_RAW"]
204
        env:
205
        - name: POD_NAME
206
          valueFrom:
207
            fieldRef:
208
              fieldPath: metadata.name
209
        - name: POD_NAMESPACE
210
          valueFrom:
211
            fieldRef:
212
              fieldPath: metadata.namespace
213
        volumeMounts:
214
        - name: run
215
          mountPath: /run/flannel
216
        - name: flannel-cfg
217
          mountPath: /etc/kube-flannel/
218
      volumes:
219
        - name: run
220
          hostPath:
221
            path: /run/flannel
222
        - name: cni
223
          hostPath:
224
            path: /etc/cni/net.d
225
        - name: flannel-cfg
226
          configMap:
227
            name: kube-flannel-cfg
228
---
229
apiVersion: apps/v1
230
kind: DaemonSet
231
metadata:
232
  name: kube-flannel-ds-arm64
233
  namespace: kube-system
234
  labels:
235
    tier: node
236
    app: flannel
237
spec:
238
  selector:
239
    matchLabels:
240
      app: flannel
241
  template:
242
    metadata:
243
      labels:
244
        tier: node
245
        app: flannel
246
    spec:
247
      affinity:
248
        nodeAffinity:
249
          requiredDuringSchedulingIgnoredDuringExecution:
250
            nodeSelectorTerms:
251
              - matchExpressions:
252
                  - key: kubernetes.io/os
253
                    operator: In
254
                    values:
255
                      - linux
256
                  - key: kubernetes.io/arch
257
                    operator: In
258
                    values:
259
                      - arm64
260
      hostNetwork: true
261
      priorityClassName: system-node-critical
262
      tolerations:
263
      - operator: Exists
264
        effect: NoSchedule
265
      serviceAccountName: flannel
266
      initContainers:
267
      - name: install-cni
268
        image: quay.io/coreos/flannel:v0.12.0-arm64
269
        command:
270
        - cp
271
        args:
272
        - -f
273
        - /etc/kube-flannel/cni-conf.json
274
        - /etc/cni/net.d/10-flannel.conflist
275
        volumeMounts:
276
        - name: cni
277
          mountPath: /etc/cni/net.d
278
        - name: flannel-cfg
279
          mountPath: /etc/kube-flannel/
280
      containers:
281
      - name: kube-flannel
282
        image: quay.io/coreos/flannel:v0.12.0-arm64
283
        command:
284
        - /opt/bin/flanneld
285
        args:
286
        - --ip-masq
287
        - --kube-subnet-mgr
288
        resources:
289
          requests:
290
            cpu: "100m"
291
            memory: "50Mi"
292
          limits:
293
            cpu: "100m"
294
            memory: "50Mi"
295
        securityContext:
296
          privileged: false
297
          capabilities:
298
             add: ["NET_ADMIN", "NET_RAW"]
299
        env:
300
        - name: POD_NAME
301
          valueFrom:
302
            fieldRef:
303
              fieldPath: metadata.name
304
        - name: POD_NAMESPACE
305
          valueFrom:
306
            fieldRef:
307
              fieldPath: metadata.namespace
308
        volumeMounts:
309
        - name: run
310
          mountPath: /run/flannel
311
        - name: flannel-cfg
312
          mountPath: /etc/kube-flannel/
313
      volumes:
314
        - name: run
315
          hostPath:
316
            path: /run/flannel
317
        - name: cni
318
          hostPath:
319
            path: /etc/cni/net.d
320
        - name: flannel-cfg
321
          configMap:
322
            name: kube-flannel-cfg
323
---
324
apiVersion: apps/v1
325
kind: DaemonSet
326
metadata:
327
  name: kube-flannel-ds-arm
328
  namespace: kube-system
329
  labels:
330
    tier: node
331
    app: flannel
332
spec:
333
  selector:
334
    matchLabels:
335
      app: flannel
336
  template:
337
    metadata:
338
      labels:
339
        tier: node
340
        app: flannel
341
    spec:
342
      affinity:
343
        nodeAffinity:
344
          requiredDuringSchedulingIgnoredDuringExecution:
345
            nodeSelectorTerms:
346
              - matchExpressions:
347
                  - key: kubernetes.io/os
348
                    operator: In
349
                    values:
350
                      - linux
351
                  - key: kubernetes.io/arch
352
                    operator: In
353
                    values:
354
                      - arm
355
      hostNetwork: true
356
      priorityClassName: system-node-critical
357
      tolerations:
358
      - operator: Exists
359
        effect: NoSchedule
360
      serviceAccountName: flannel
361
      initContainers:
362
      - name: install-cni
363
        image: quay.io/coreos/flannel:v0.12.0-arm
364
        command:
365
        - cp
366
        args:
367
        - -f
368
        - /etc/kube-flannel/cni-conf.json
369
        - /etc/cni/net.d/10-flannel.conflist
370
        volumeMounts:
371
        - name: cni
372
          mountPath: /etc/cni/net.d
373
        - name: flannel-cfg
374
          mountPath: /etc/kube-flannel/
375
      containers:
376
      - name: kube-flannel
377
        image: quay.io/coreos/flannel:v0.12.0-arm
378
        command:
379
        - /opt/bin/flanneld
380
        args:
381
        - --ip-masq
382
        - --kube-subnet-mgr
383
        resources:
384
          requests:
385
            cpu: "100m"
386
            memory: "50Mi"
387
          limits:
388
            cpu: "100m"
389
            memory: "50Mi"
390
        securityContext:
391
          privileged: false
392
          capabilities:
393
             add: ["NET_ADMIN", "NET_RAW"]
394
        env:
395
        - name: POD_NAME
396
          valueFrom:
397
            fieldRef:
398
              fieldPath: metadata.name
399
        - name: POD_NAMESPACE
400
          valueFrom:
401
            fieldRef:
402
              fieldPath: metadata.namespace
403
        volumeMounts:
404
        - name: run
405
          mountPath: /run/flannel
406
        - name: flannel-cfg
407
          mountPath: /etc/kube-flannel/
408
      volumes:
409
        - name: run
410
          hostPath:
411
            path: /run/flannel
412
        - name: cni
413
          hostPath:
414
            path: /etc/cni/net.d
415
        - name: flannel-cfg
416
          configMap:
417
            name: kube-flannel-cfg
418
---
419
apiVersion: apps/v1
420
kind: DaemonSet
421
metadata:
422
  name: kube-flannel-ds-ppc64le
423
  namespace: kube-system
424
  labels:
425
    tier: node
426
    app: flannel
427
spec:
428
  selector:
429
    matchLabels:
430
      app: flannel
431
  template:
432
    metadata:
433
      labels:
434
        tier: node
435
        app: flannel
436
    spec:
437
      affinity:
438
        nodeAffinity:
439
          requiredDuringSchedulingIgnoredDuringExecution:
440
            nodeSelectorTerms:
441
              - matchExpressions:
442
                  - key: kubernetes.io/os
443
                    operator: In
444
                    values:
445
                      - linux
446
                  - key: kubernetes.io/arch
447
                    operator: In
448
                    values:
449
                      - ppc64le
450
      hostNetwork: true
451
      priorityClassName: system-node-critical
452
      tolerations:
453
      - operator: Exists
454
        effect: NoSchedule
455
      serviceAccountName: flannel
456
      initContainers:
457
      - name: install-cni
458
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
459
        command:
460
        - cp
461
        args:
462
        - -f
463
        - /etc/kube-flannel/cni-conf.json
464
        - /etc/cni/net.d/10-flannel.conflist
465
        volumeMounts:
466
        - name: cni
467
          mountPath: /etc/cni/net.d
468
        - name: flannel-cfg
469
          mountPath: /etc/kube-flannel/
470
      containers:
471
      - name: kube-flannel
472
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
473
        command:
474
        - /opt/bin/flanneld
475
        args:
476
        - --ip-masq
477
        - --kube-subnet-mgr
478
        resources:
479
          requests:
480
            cpu: "100m"
481
            memory: "50Mi"
482
          limits:
483
            cpu: "100m"
484
            memory: "50Mi"
485
        securityContext:
486
          privileged: false
487
          capabilities:
488
             add: ["NET_ADMIN", "NET_RAW"]
489
        env:
490
        - name: POD_NAME
491
          valueFrom:
492
            fieldRef:
493
              fieldPath: metadata.name
494
        - name: POD_NAMESPACE
495
          valueFrom:
496
            fieldRef:
497
              fieldPath: metadata.namespace
498
        volumeMounts:
499
        - name: run
500
          mountPath: /run/flannel
501
        - name: flannel-cfg
502
          mountPath: /etc/kube-flannel/
503
      volumes:
504
        - name: run
505
          hostPath:
506
            path: /run/flannel
507
        - name: cni
508
          hostPath:
509
            path: /etc/cni/net.d
510
        - name: flannel-cfg
511
          configMap:
512
            name: kube-flannel-cfg
513
---
514
apiVersion: apps/v1
515
kind: DaemonSet
516
metadata:
517
  name: kube-flannel-ds-s390x
518
  namespace: kube-system
519
  labels:
520
    tier: node
521
    app: flannel
522
spec:
523
  selector:
524
    matchLabels:
525
      app: flannel
526
  template:
527
    metadata:
528
      labels:
529
        tier: node
530
        app: flannel
531
    spec:
532
      affinity:
533
        nodeAffinity:
534
          requiredDuringSchedulingIgnoredDuringExecution:
535
            nodeSelectorTerms:
536
              - matchExpressions:
537
                  - key: kubernetes.io/os
538
                    operator: In
539
                    values:
540
                      - linux
541
                  - key: kubernetes.io/arch
542
                    operator: In
543
                    values:
544
                      - s390x
545
      hostNetwork: true
546
      priorityClassName: system-node-critical
547
      tolerations:
548
      - operator: Exists
549
        effect: NoSchedule
550
      serviceAccountName: flannel
551
      initContainers:
552
      - name: install-cni
553
        image: quay.io/coreos/flannel:v0.12.0-s390x
554
        command:
555
        - cp
556
        args:
557
        - -f
558
        - /etc/kube-flannel/cni-conf.json
559
        - /etc/cni/net.d/10-flannel.conflist
560
        volumeMounts:
561
        - name: cni
562
          mountPath: /etc/cni/net.d
563
        - name: flannel-cfg
564
          mountPath: /etc/kube-flannel/
565
      containers:
566
      - name: kube-flannel
567
        image: quay.io/coreos/flannel:v0.12.0-s390x
568
        command:
569
        - /opt/bin/flanneld
570
        args:
571
        - --ip-masq
572
        - --kube-subnet-mgr
573
        resources:
574
          requests:
575
            cpu: "100m"
576
            memory: "50Mi"
577
          limits:
578
            cpu: "100m"
579
            memory: "50Mi"
580
        securityContext:
581
          privileged: false
582
          capabilities:
583
             add: ["NET_ADMIN", "NET_RAW"]
584
        env:
585
        - name: POD_NAME
586
          valueFrom:
587
            fieldRef:
588
              fieldPath: metadata.name
589
        - name: POD_NAMESPACE
590
          valueFrom:
591
            fieldRef:
592
              fieldPath: metadata.namespace
593
        volumeMounts:
594
        - name: run
595
          mountPath: /run/flannel
596
        - name: flannel-cfg
597
          mountPath: /etc/kube-flannel/
598
      volumes:
599
        - name: run
600
          hostPath:
601
            path: /run/flannel
602
        - name: cni
603
          hostPath:
604
            path: /etc/cni/net.d
605
        - name: flannel-cfg
606
          configMap:
607
            name: kube-flannel-cfg

可以看到node变为ready

1
kubectl get node
2
NAME          STATUS   ROLES    AGE   VERSION
3
k8s-master1   Ready    <none>   39h   v1.18.2
4
k8s-master2   Ready    <none>   38h   v1.18.2
5
k8s-master3   Ready    <none>   38h   v1.18.2

部署coredns

1
git clone https://github.com/coredns/deployment.git
2
cd coredns/deployment/kubernetes

需要修改配置

1
if [[ -z $CLUSTER_DNS_IP ]]; then
2
   # Default IP to kube-dns IP
3
   # CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}")
4
   CLUSTER_DNS_IP=10.10.0.2
1
./deploy.sh | kubectl apply -f -
2
3
./deploy.sh 
4
apiVersion: v1
5
kind: ServiceAccount
6
metadata:
7
  name: coredns
8
  namespace: kube-system
9
---
10
apiVersion: rbac.authorization.k8s.io/v1
11
kind: ClusterRole
12
metadata:
13
  labels:
14
    kubernetes.io/bootstrapping: rbac-defaults
15
  name: system:coredns
16
rules:
17
- apiGroups:
18
  - ""
19
  resources:
20
  - endpoints
21
  - services
22
  - pods
23
  - namespaces
24
  verbs:
25
  - list
26
  - watch
27
---
28
apiVersion: rbac.authorization.k8s.io/v1
29
kind: ClusterRoleBinding
30
metadata:
31
  annotations:
32
    rbac.authorization.kubernetes.io/autoupdate: "true"
33
  labels:
34
    kubernetes.io/bootstrapping: rbac-defaults
35
  name: system:coredns
36
roleRef:
37
  apiGroup: rbac.authorization.k8s.io
38
  kind: ClusterRole
39
  name: system:coredns
40
subjects:
41
- kind: ServiceAccount
42
  name: coredns
43
  namespace: kube-system
44
---
45
apiVersion: v1
46
kind: ConfigMap
47
metadata:
48
  name: coredns
49
  namespace: kube-system
50
data:
51
  Corefile: |
52
    .:53 {
53
        errors
54
        health {
55
          lameduck 5s
56
        }
57
        ready
58
        kubernetes cluster.local in-addr.arpa ip6.arpa {
59
          fallthrough in-addr.arpa ip6.arpa
60
        }
61
        prometheus :9153
62
        forward . /etc/resolv.conf {
63
          max_concurrent 1000
64
        }
65
        cache 30
66
        loop
67
        reload
68
        loadbalance
69
    }
70
---
71
apiVersion: apps/v1
72
kind: Deployment
73
metadata:
74
  name: coredns
75
  namespace: kube-system
76
  labels:
77
    k8s-app: kube-dns
78
    kubernetes.io/name: "CoreDNS"
79
spec:
80
  # replicas: not specified here:
81
  # 1. Default is 1.
82
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
83
  strategy:
84
    type: RollingUpdate
85
    rollingUpdate:
86
      maxUnavailable: 1
87
  selector:
88
    matchLabels:
89
      k8s-app: kube-dns
90
  template:
91
    metadata:
92
      labels:
93
        k8s-app: kube-dns
94
    spec:
95
      priorityClassName: system-cluster-critical
96
      serviceAccountName: coredns
97
      tolerations:
98
        - key: "CriticalAddonsOnly"
99
          operator: "Exists"
100
      nodeSelector:
101
        kubernetes.io/os: linux
102
      affinity:
103
         podAntiAffinity:
104
           preferredDuringSchedulingIgnoredDuringExecution:
105
           - weight: 100
106
             podAffinityTerm:
107
               labelSelector:
108
                 matchExpressions:
109
                   - key: k8s-app
110
                     operator: In
111
                     values: ["kube-dns"]
112
               topologyKey: kubernetes.io/hostname
113
      containers:
114
      - name: coredns
115
        image: coredns/coredns:1.7.0
116
        imagePullPolicy: IfNotPresent
117
        resources:
118
          limits:
119
            memory: 170Mi
120
          requests:
121
            cpu: 100m
122
            memory: 70Mi
123
        args: [ "-conf", "/etc/coredns/Corefile" ]
124
        volumeMounts:
125
        - name: config-volume
126
          mountPath: /etc/coredns
127
          readOnly: true
128
        ports:
129
        - containerPort: 53
130
          name: dns
131
          protocol: UDP
132
        - containerPort: 53
133
          name: dns-tcp
134
          protocol: TCP
135
        - containerPort: 9153
136
          name: metrics
137
          protocol: TCP
138
        securityContext:
139
          allowPrivilegeEscalation: false
140
          capabilities:
141
            add:
142
            - NET_BIND_SERVICE
143
            drop:
144
            - all
145
          readOnlyRootFilesystem: true
146
        livenessProbe:
147
          httpGet:
148
            path: /health
149
            port: 8080
150
            scheme: HTTP
151
          initialDelaySeconds: 60
152
          timeoutSeconds: 5
153
          successThreshold: 1
154
          failureThreshold: 5
155
        readinessProbe:
156
          httpGet:
157
            path: /ready
158
            port: 8181
159
            scheme: HTTP
160
      dnsPolicy: Default
161
      volumes:
162
        - name: config-volume
163
          configMap:
164
            name: coredns
165
            items:
166
            - key: Corefile
167
              path: Corefile
168
---
169
apiVersion: v1
170
kind: Service
171
metadata:
172
  name: kube-dns
173
  namespace: kube-system
174
  annotations:
175
    prometheus.io/port: "9153"
176
    prometheus.io/scrape: "true"
177
  labels:
178
    k8s-app: kube-dns
179
    kubernetes.io/cluster-service: "true"
180
    kubernetes.io/name: "CoreDNS"
181
spec:
182
  selector:
183
    k8s-app: kube-dns
184
  clusterIP: 10.10.0.2
185
  ports:
186
  - name: dns
187
    port: 53
188
    protocol: UDP
189
  - name: dns-tcp
190
    port: 53
191
    protocol: TCP
192
  - name: metrics
193
    port: 9153
194
    protocol: TCP

部署集群监控服务metrics server

1
git clone https://github.com/kubernetes-sigs/metrics-server.git -b v0.3.6
2
cd metrics-server/deploy/1.8+

修改metrics-server-deployment.yaml

1
kind: ServiceAccount
2
metadata:
3
  name: metrics-server
4
  namespace: kube-system
5
---
6
apiVersion: apps/v1
7
kind: Deployment
8
metadata:
9
  name: metrics-server
10
  namespace: kube-system
11
  labels:
12
    k8s-app: metrics-server
13
spec:
14
  selector:
15
    matchLabels:
16
      k8s-app: metrics-server
17
  template:
18
    metadata:
19
      name: metrics-server
20
      labels:
21
        k8s-app: metrics-server
22
    spec:
23
      serviceAccountName: metrics-server
24
      volumes:
25
      # mount in tmp so we can safely use from-scratch images and/or read-only containers
26
      - name: tmp-dir
27
        emptyDir: {}
28
      containers:
29
      - name: metrics-server
30
        image: jusene/metrics-server-amd64:v0.3.6
31
        imagePullPolicy: IfNotPresent
32
        resources:
33
          limits:
34
            cpu: 400m
35
            memory: 1024Mi
36
          requests:
37
            cpu: 50m
38
            memory: 50Mi
39
        command:
40
        - /metrics-server
41
        - --kubelet-insecure-tls
42
        - --kubelet-preferred-address-types=InternalIP
43
        volumeMounts:
44
        - name: tmp-dir
45
          mountPath: /tmp
  • aggregated-metrics-reader.yaml
1
kind: ClusterRole
2
apiVersion: rbac.authorization.k8s.io/v1
3
metadata:
4
  name: system:aggregated-metrics-reader
5
  labels:
6
    rbac.authorization.k8s.io/aggregate-to-view: "true"
7
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
8
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
9
rules:
10
- apiGroups: ["metrics.k8s.io"]
11
  resources: ["pods", "nodes"]
12
  verbs: ["get", "list", "watch"]
  • auth-reader.yaml
1
---
2
apiVersion: rbac.authorization.k8s.io/v1beta1
3
kind: RoleBinding
4
metadata:
5
  name: metrics-server-auth-reader
6
  namespace: kube-system
7
roleRef:
8
  apiGroup: rbac.authorization.k8s.io
9
  kind: Role
10
  name: extension-apiserver-authentication-reader
11
subjects:
12
- kind: ServiceAccount
13
  name: metrics-server
14
  namespace: kube-system
  • resource-reader.yaml
1
---
2
apiVersion: rbac.authorization.k8s.io/v1
3
kind: ClusterRole
4
metadata:
5
  name: system:metrics-server
6
rules:
7
- apiGroups:
8
  - ""
9
  resources:
10
  - pods
11
  - nodes
12
  - nodes/stats
13
  - namespaces
14
  verbs:
15
  - get
16
  - list
17
  - watch
18
---
19
apiVersion: rbac.authorization.k8s.io/v1
20
kind: ClusterRoleBinding
21
metadata:
22
  name: system:metrics-server
23
roleRef:
24
  apiGroup: rbac.authorization.k8s.io
25
  kind: ClusterRole
26
  name: system:metrics-server
27
subjects:
28
- kind: ServiceAccount
29
  name: metrics-server
30
  namespace: kube-system
  • auth-delegator.yaml
1
---
2
apiVersion: rbac.authorization.k8s.io/v1beta1
3
kind: ClusterRoleBinding
4
metadata:
5
  name: metrics-server:system:auth-delegator
6
roleRef:
7
  apiGroup: rbac.authorization.k8s.io
8
  kind: ClusterRole
9
  name: system:auth-delegator
10
subjects:
11
- kind: ServiceAccount
12
  name: metrics-server
13
  namespace: kube-system
  • metrics-apiservice.yaml
1
---
2
apiVersion: apiregistration.k8s.io/v1beta1
3
kind: APIService
4
metadata:
5
  name: v1beta1.metrics.k8s.io
6
spec:
7
  service:
8
    name: metrics-server
9
    namespace: kube-system
10
  group: metrics.k8s.io
11
  version: v1beta1
12
  insecureSkipTLSVerify: true
13
  groupPriorityMinimum: 100
14
  versionPriority: 100
  • metrics-server-service.yaml
1
---
2
apiVersion: v1
3
kind: Service
4
metadata:
5
  name: metrics-server
6
  namespace: kube-system
7
  labels:
8
    kubernetes.io/name: "Metrics-server"
9
    kubernetes.io/cluster-service: "true"
10
spec:
11
  selector:
12
    k8s-app: metrics-server
13
  ports:
14
  - port: 443
15
    protocol: TCP
16
    targetPort: 443

应用yaml

1
kubectl apply -f .
2
3
kubectl top node
4
NAME          CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
5
k8s-master1   144m         7%     1163Mi          39%       
6
k8s-master2   152m         7%     1145Mi          38%       
7
k8s-master3   133m         6%     1077Mi          36%

部署nginx-ingress

1
apiVersion: v1
2
kind: Namespace
3
metadata:
4
  name: ingress-nginx
5
  labels:
6
    app.kubernetes.io/name: ingress-nginx
7
    app.kubernetes.io/part-of: ingress-nginx
8
9
---
10
11
kind: ConfigMap
12
apiVersion: v1
13
metadata:
14
  name: nginx-configuration
15
  namespace: ingress-nginx
16
  labels:
17
    app.kubernetes.io/name: ingress-nginx
18
    app.kubernetes.io/part-of: ingress-nginx
19
20
---
21
kind: ConfigMap
22
apiVersion: v1
23
metadata:
24
  name: tcp-services
25
  namespace: ingress-nginx
26
  labels:
27
    app.kubernetes.io/name: ingress-nginx
28
    app.kubernetes.io/part-of: ingress-nginx
29
30
---
31
kind: ConfigMap
32
apiVersion: v1
33
metadata:
34
  name: udp-services
35
  namespace: ingress-nginx
36
  labels:
37
    app.kubernetes.io/name: ingress-nginx
38
    app.kubernetes.io/part-of: ingress-nginx
39
40
---
41
apiVersion: v1
42
kind: ServiceAccount
43
metadata:
44
  name: nginx-ingress-serviceaccount
45
  namespace: ingress-nginx
46
  labels:
47
    app.kubernetes.io/name: ingress-nginx
48
    app.kubernetes.io/part-of: ingress-nginx
49
50
---
51
apiVersion: rbac.authorization.k8s.io/v1beta1
52
kind: ClusterRole
53
metadata:
54
  name: nginx-ingress-clusterrole
55
  labels:
56
    app.kubernetes.io/name: ingress-nginx
57
    app.kubernetes.io/part-of: ingress-nginx
58
rules:
59
  - apiGroups:
60
      - ""
61
    resources:
62
      - configmaps
63
      - endpoints
64
      - nodes
65
      - pods
66
      - secrets
67
    verbs:
68
      - list
69
      - watch
70
  - apiGroups:
71
      - ""
72
    resources:
73
      - nodes
74
    verbs:
75
      - get
76
  - apiGroups:
77
      - ""
78
    resources:
79
      - services
80
    verbs:
81
      - get
82
      - list
83
      - watch
84
  - apiGroups:
85
      - ""
86
    resources:
87
      - events
88
    verbs:
89
      - create
90
      - patch
91
  - apiGroups:
92
      - "extensions"
93
      - "networking.k8s.io"
94
    resources:
95
      - ingresses
96
    verbs:
97
      - get
98
      - list
99
      - watch
100
  - apiGroups:
101
      - "extensions"
102
      - "networking.k8s.io"
103
    resources:
104
      - ingresses/status
105
    verbs:
106
      - update
107
108
---
109
apiVersion: rbac.authorization.k8s.io/v1beta1
110
kind: Role
111
metadata:
112
  name: nginx-ingress-role
113
  namespace: ingress-nginx
114
  labels:
115
    app.kubernetes.io/name: ingress-nginx
116
    app.kubernetes.io/part-of: ingress-nginx
117
rules:
118
  - apiGroups:
119
      - ""
120
    resources:
121
      - configmaps
122
      - pods
123
      - secrets
124
      - namespaces
125
    verbs:
126
      - get
127
  - apiGroups:
128
      - ""
129
    resources:
130
      - configmaps
131
    resourceNames:
132
      # Defaults to "<election-id>-<ingress-class>"
133
      # Here: "<ingress-controller-leader>-<nginx>"
134
      # This has to be adapted if you change either parameter
135
      # when launching the nginx-ingress-controller.
136
      - "ingress-controller-leader-nginx"
137
    verbs:
138
      - get
139
      - update
140
  - apiGroups:
141
      - ""
142
    resources:
143
      - configmaps
144
    verbs:
145
      - create
146
  - apiGroups:
147
      - ""
148
    resources:
149
      - endpoints
150
    verbs:
151
      - get
152
153
---
154
apiVersion: rbac.authorization.k8s.io/v1beta1
155
kind: RoleBinding
156
metadata:
157
  name: nginx-ingress-role-nisa-binding
158
  namespace: ingress-nginx
159
  labels:
160
    app.kubernetes.io/name: ingress-nginx
161
    app.kubernetes.io/part-of: ingress-nginx
162
roleRef:
163
  apiGroup: rbac.authorization.k8s.io
164
  kind: Role
165
  name: nginx-ingress-role
166
subjects:
167
  - kind: ServiceAccount
168
    name: nginx-ingress-serviceaccount
169
    namespace: ingress-nginx
170
171
---
172
apiVersion: rbac.authorization.k8s.io/v1beta1
173
kind: ClusterRoleBinding
174
metadata:
175
  name: nginx-ingress-clusterrole-nisa-binding
176
  labels:
177
    app.kubernetes.io/name: ingress-nginx
178
    app.kubernetes.io/part-of: ingress-nginx
179
roleRef:
180
  apiGroup: rbac.authorization.k8s.io
181
  kind: ClusterRole
182
  name: nginx-ingress-clusterrole
183
subjects:
184
  - kind: ServiceAccount
185
    name: nginx-ingress-serviceaccount
186
    namespace: ingress-nginx
187
188
---
189
190
apiVersion: apps/v1
191
kind: Deployment
192
metadata:
193
  name: nginx-ingress-controller
194
  namespace: ingress-nginx
195
  labels:
196
    app.kubernetes.io/name: ingress-nginx
197
    app.kubernetes.io/part-of: ingress-nginx
198
spec:
199
  replicas: 1
200
  selector:
201
    matchLabels:
202
      app.kubernetes.io/name: ingress-nginx
203
      app.kubernetes.io/part-of: ingress-nginx
204
  template:
205
    metadata:
206
      labels:
207
        app.kubernetes.io/name: ingress-nginx
208
        app.kubernetes.io/part-of: ingress-nginx
209
      annotations:
210
        prometheus.io/port: "10254"
211
        prometheus.io/scrape: "true"
212
    spec:
213
      hostNetwork: true
214
      # wait up to five minutes for the drain of connections
215
      terminationGracePeriodSeconds: 300
216
      serviceAccountName: nginx-ingress-serviceaccount
217
      containers:
218
        - name: nginx-ingress-controller
219
          image: jusene/nginx-ingress-controller:0.26.1
220
          args:
221
            - /nginx-ingress-controller
222
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
223
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
224
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
225
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
226
            - --annotations-prefix=nginx.ingress.kubernetes.io
227
          securityContext:
228
            allowPrivilegeEscalation: true
229
            capabilities:
230
              drop:
231
                - ALL
232
              add:
233
                - NET_BIND_SERVICE
234
            # www-data -> 33
235
            runAsUser: 33
236
          env:
237
            - name: POD_NAME
238
              valueFrom:
239
                fieldRef:
240
                  fieldPath: metadata.name
241
            - name: POD_NAMESPACE
242
              valueFrom:
243
                fieldRef:
244
                  fieldPath: metadata.namespace
245
          ports:
246
            - name: http
247
              containerPort: 80
248
            - name: https
249
              containerPort: 443
250
          livenessProbe:
251
            failureThreshold: 3
252
            httpGet:
253
              path: /healthz
254
              port: 10254
255
              scheme: HTTP
256
            initialDelaySeconds: 10
257
            periodSeconds: 10
258
            successThreshold: 1
259
            timeoutSeconds: 10
260
          readinessProbe:
261
            failureThreshold: 3
262
            httpGet:
263
              path: /healthz
264
              port: 10254
265
              scheme: HTTP
266
            periodSeconds: 10
267
            successThreshold: 1
268
            timeoutSeconds: 10
269
          lifecycle:
270
            preStop:
271
              exec:
272
                command:
273
                  - /wait-shutdown
274
---
275
apiVersion: v1
276
kind: Service
277
metadata:
278
  name: ingress-nginx
279
  namespace: ingress-nginx
280
spec:
281
  type: ClusterIP
282
  ports:
283
  - name: http
284
    port: 80
285
    targetPort: 80
286
    protocol: TCP
287
  - name: https
288
    port: 443
289
    targetPort: 443
290
    protocol: TCP
291
  selector:
292
    app.kubernetes.io/name: ingress-nginx

部署kubernetes dashboard

1
# Copyright 2017 The Kubernetes Authors.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14
15
apiVersion: v1
16
kind: Namespace
17
metadata:
18
  name: kubernetes-dashboard
19
20
---
21
22
apiVersion: v1
23
kind: ServiceAccount
24
metadata:
25
  labels:
26
    k8s-app: kubernetes-dashboard
27
  name: kubernetes-dashboard
28
  namespace: kubernetes-dashboard
29
30
---
31
32
kind: Service
33
apiVersion: v1
34
metadata:
35
  labels:
36
    k8s-app: kubernetes-dashboard
37
  name: kubernetes-dashboard
38
  namespace: kubernetes-dashboard
39
spec:
40
  ports:
41
    - port: 443
42
      targetPort: 8443
43
  selector:
44
    k8s-app: kubernetes-dashboard
45
---
46
kind: Ingress
47
apiVersion: extensions/v1beta1
48
metadata:
49
  name: kubernetes-dashboard
50
  namespace: kubernetes-dashboard
51
  annotations:
52
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
53
    nginx.ingress.kubernetes.io/rewrite-target: /
54
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
55
spec:
56
  tls:
57
  - hosts:
58
    - dashboard.zjhw.com
59
    secretName: dashboard-tls
60
61
  rules:
62
  - host: dashboard.zjhw.com
63
    http:
64
      paths:
65
      - path: /
66
        backend:
67
          serviceName: kubernetes-dashboard
68
          servicePort: 443
69
70
---
71
72
apiVersion: v1
73
kind: Secret
74
metadata:
75
  labels:
76
    k8s-app: kubernetes-dashboard
77
  name: kubernetes-dashboard-certs
78
  namespace: kubernetes-dashboard
79
type: Opaque
80
81
---
82
83
apiVersion: v1
84
kind: Secret
85
metadata:
86
  labels:
87
    k8s-app: kubernetes-dashboard
88
  name: kubernetes-dashboard-csrf
89
  namespace: kubernetes-dashboard
90
type: Opaque
91
data:
92
  csrf: ""
93
94
---
95
96
apiVersion: v1
97
kind: Secret
98
metadata:
99
  labels:
100
    k8s-app: kubernetes-dashboard
101
  name: kubernetes-dashboard-key-holder
102
  namespace: kubernetes-dashboard
103
type: Opaque
104
105
---
106
107
kind: ConfigMap
108
apiVersion: v1
109
metadata:
110
  labels:
111
    k8s-app: kubernetes-dashboard
112
  name: kubernetes-dashboard-settings
113
  namespace: kubernetes-dashboard
114
115
---
116
117
kind: Role
118
apiVersion: rbac.authorization.k8s.io/v1
119
metadata:
120
  labels:
121
    k8s-app: kubernetes-dashboard
122
  name: kubernetes-dashboard
123
  namespace: kubernetes-dashboard
124
rules:
125
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
126
  - apiGroups: [""]
127
    resources: ["secrets"]
128
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
129
    verbs: ["get", "update", "delete"]
130
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
131
  - apiGroups: [""]
132
    resources: ["configmaps"]
133
    resourceNames: ["kubernetes-dashboard-settings"]
134
    verbs: ["get", "update"]
135
    # Allow Dashboard to get metrics.
136
  - apiGroups: [""]
137
    resources: ["services"]
138
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
139
    verbs: ["proxy"]
140
  - apiGroups: [""]
141
    resources: ["services/proxy"]
142
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
143
    verbs: ["get"]
144
145
---
146
147
kind: ClusterRole
148
apiVersion: rbac.authorization.k8s.io/v1
149
metadata:
150
  labels:
151
    k8s-app: kubernetes-dashboard
152
  name: kubernetes-dashboard
153
rules:
154
  # Allow Metrics Scraper to get metrics from the Metrics server
155
  - apiGroups: ["metrics.k8s.io"]
156
    resources: ["pods", "nodes"]
157
    verbs: ["get", "list", "watch"]
158
159
---
160
161
apiVersion: rbac.authorization.k8s.io/v1
162
kind: RoleBinding
163
metadata:
164
  labels:
165
    k8s-app: kubernetes-dashboard
166
  name: kubernetes-dashboard
167
  namespace: kubernetes-dashboard
168
roleRef:
169
  apiGroup: rbac.authorization.k8s.io
170
  kind: Role
171
  name: kubernetes-dashboard
172
subjects:
173
  - kind: ServiceAccount
174
    name: kubernetes-dashboard
175
    namespace: kubernetes-dashboard
176
177
---
178
179
apiVersion: rbac.authorization.k8s.io/v1
180
kind: ClusterRoleBinding
181
metadata:
182
  name: kubernetes-dashboard
183
roleRef:
184
  apiGroup: rbac.authorization.k8s.io
185
  kind: ClusterRole
186
  name: kubernetes-dashboard
187
subjects:
188
  - kind: ServiceAccount
189
    name: kubernetes-dashboard
190
    namespace: kubernetes-dashboard
191
192
---
193
194
kind: Deployment
195
apiVersion: apps/v1
196
metadata:
197
  labels:
198
    k8s-app: kubernetes-dashboard
199
  name: kubernetes-dashboard
200
  namespace: kubernetes-dashboard
201
spec:
202
  replicas: 1
203
  revisionHistoryLimit: 10
204
  selector:
205
    matchLabels:
206
      k8s-app: kubernetes-dashboard
207
  template:
208
    metadata:
209
      labels:
210
        k8s-app: kubernetes-dashboard
211
    spec:
212
      containers:
213
        - name: kubernetes-dashboard
214
          image: harbor.zjhw.com/library/dashboard:v2.0.0
215
          imagePullPolicy: Always
216
          ports:
217
            - containerPort: 8443
218
              protocol: TCP
219
          args:
220
            - --auto-generate-certificates
221
            - --namespace=kubernetes-dashboard
222
            # Uncomment the following line to manually specify Kubernetes API server Host
223
            # If not specified, Dashboard will attempt to auto discover the API server and connect
224
            # to it. Uncomment only if the default does not work.
225
            # - --apiserver-host=http://my-address:port
226
          volumeMounts:
227
            - name: kubernetes-dashboard-certs
228
              mountPath: /certs
229
              # Create on-disk volume to store exec logs
230
            - mountPath: /tmp
231
              name: tmp-volume
232
          livenessProbe:
233
            httpGet:
234
              scheme: HTTPS
235
              path: /
236
              port: 8443
237
            initialDelaySeconds: 30
238
            timeoutSeconds: 30
239
          securityContext:
240
            allowPrivilegeEscalation: false
241
            readOnlyRootFilesystem: true
242
            runAsUser: 1001
243
            runAsGroup: 2001
244
      volumes:
245
        - name: kubernetes-dashboard-certs
246
          secret:
247
            secretName: kubernetes-dashboard-certs
248
        - name: tmp-volume
249
          emptyDir: {}
250
      serviceAccountName: kubernetes-dashboard
251
      nodeSelector:
252
        beta.kubernetes.io/arch: amd64
253
      # Comment the following tolerations if Dashboard must not be deployed on master
254
      tolerations:
255
        - key: node-role.kubernetes.io/master
256
          effect: NoSchedule
257
258
---
259
260
kind: Service
261
apiVersion: v1
262
metadata:
263
  labels:
264
    k8s-app: dashboard-metrics-scraper
265
  name: dashboard-metrics-scraper
266
  namespace: kubernetes-dashboard
267
spec:
268
  ports:
269
    - port: 8000
270
      targetPort: 8000
271
  selector:
272
    k8s-app: dashboard-metrics-scraper
273
274
---
275
276
kind: Deployment
277
apiVersion: apps/v1
278
metadata:
279
  labels:
280
    k8s-app: dashboard-metrics-scraper
281
  name: dashboard-metrics-scraper
282
  namespace: kubernetes-dashboard
283
spec:
284
  replicas: 1
285
  revisionHistoryLimit: 10
286
  selector:
287
    matchLabels:
288
      k8s-app: dashboard-metrics-scraper
289
  template:
290
    metadata:
291
      labels:
292
        k8s-app: dashboard-metrics-scraper
293
      annotations:
294
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
295
    spec:
296
      containers:
297
        - name: dashboard-metrics-scraper
298
          image: harbor.zjhw.com/library/metrics-scraper:v1.0.4
299
          ports:
300
            - containerPort: 8000
301
              protocol: TCP
302
          livenessProbe:
303
            httpGet:
304
              scheme: HTTP
305
              path: /
306
              port: 8000
307
            initialDelaySeconds: 30
308
            timeoutSeconds: 30
309
          volumeMounts:
310
          - mountPath: /tmp
311
            name: tmp-volume
312
          securityContext:
313
            allowPrivilegeEscalation: false
314
            readOnlyRootFilesystem: true
315
            runAsUser: 1001
316
            runAsGroup: 2001
317
      serviceAccountName: kubernetes-dashboard
318
      nodeSelector:
319
        beta.kubernetes.io/arch: amd64
320
      # Comment the following tolerations if Dashboard must not be deployed on master
321
      tolerations:
322
        - key: node-role.kubernetes.io/master
323
          effect: NoSchedule
324
      volumes:
325
        - name: tmp-volume
326
          emptyDir: {}
CATALOG
  1. 1. 环境
    1. 1.1. 服务器信息
    2. 1.2. 网络划分
  2. 2. 初始化环境
  3. 3. kubernetes 部署
    1. 3.1. 部署ha
    2. 3.2. 自签TLS证书
    3. 3.3. 部署etcd集群
  4. 4. 部署kubernetes
  5. 5. 配置Master组件并运行
    1. 5.1. 生成管理员config
  6. 6. 配置kubelet证书自动续期和创建node授权用户
  7. 7. 配置kubeconfig
  8. 8. 解决无法访问日志
  9. 9. 部署node节点
  10. 10. 安装flannel网络
  11. 11. 部署coredns
  12. 12. 部署集群监控服务metrics server
  13. 13. 部署nginx-ingress
  14. 14. 部署kubernetes dashboard