• 周四. 10 月 3rd, 2024

5G编程聚合网

5G时代下一个聚合的编程学习网

热门标签

centos7 二进制部署kubernetes(v1.19.0) 高可用集群

admin

11 月 28, 2021

centos7 二进制部署kubernetes(v1.19.0) 高可用集群
一、规划

1. master/etcd 集群节点-3台:

192.168.21.30(master)
192.168.21.31(node1)
192.168.21.32(node2)

2. node节点-3台:

192.168.21.31(node1)
192.168.21.32(node2)
192.168.21.33(node3)

3. haproxy keepalived集群高可用节点-2台

192.168.21.30(master)
192.168.21.31(node1)

4. harbor 节点-1台

192.168.21.34(node4)

二、部署etcd高可用集群

1. 为etcd和kubernetes集群创建安全连接的CA证书

使用openssl颁发自签名证书,放在/etc/kubernetes/pki 目录下

 openssl genrsa -out ca.key 2048
 ​
 openssl req -x509 -new -nodes -key ca.key -subj "/CN=192.16
 8.21.30" -days 36500 -out ca.crt
 ​
 [root@master pki]# pwd
 /etc/kubernetes/pki
 [root@master pki]# ls
 ca.crt ca.key

2. 创建etcd的CA证书

  • 创建CA根证书,包括ca.key和ca.crt

 vim etcd_ssl.cnf
 ​
 [ req ]
 req_extensions = v3_req
 distinguished_name = req_distinguished_name
 ​
 [ req_distinguished_name ]
 ​
 [ v3_req ]
 basicConstraints = CA:FALSE
 keyUsage = nonRepudiation, digitalSignature, keyEncipherment
 subjectAltName = @alt_names
 ​
 [ alt_names ]
 IP.1 = 192.168.21.30
 IP.2 = 192.168.21.31
 IP.3 = 192.168.21.32
  • 2.创建etcd服务器端证书
    使用openssl命令创建etcd的服务端CA证书,包括etcd_server.key和etcd_server.crt 保存在/etc/etcd/pki下

 [root@master pki]# openssl genrsa -out etcd_server.key 2048
 Generating RSA private key, 2048 bit long modulus
 .....................................................................................
 .................+++.............+++
 e is 65537 (0x10001)
 ​
 [root@master pki]# openssl req -new -key etcd_server.key -config etcd_ssl.cnf -subj "
 /CN=etcd-server" -out etcd_server.csr
 ​
 [root@master pki]# openssl x509 -req -in etcd_server.csr -CA /etc/kubernetes/pki/ca.c
 rt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -days 36500 -extensions v3_req -extfile etcd_ssl.cnf -out etcd_server.crtSignature ok
 subject=/CN=192.168.21.30
 Getting CA Private Key
  • 3.创建etcd客户端CA证书
    使用openssl命令创建etcd的服务端CA证书,包括etcd_client.key和etcd_client.crt 保存在/etc/etcd/pki下

 [root@master pki]# openssl genrsa -out etcd_client.key 2048
 Generating RSA private key, 2048 bit long modulus
 .............................................+++
 ..............................................................................+++
 e is 65537 (0x10001)
 [root@master pki]# openssl req -new -key etcd_client.key -config etcd_ssl.cnf -subj "
 /CN=etcd-client" -out etcd_client.csr[root@master pki]# openssl x509 -req -in etcd_client.csr -CA /etc/kubernetes/pki/ca.c
 rt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -days 36500 -extensions v3_req 
 -extfile etcd_ssl.cnf -out etcd_client.crt
 Signature ok
 subject=/CN=etcd-client
 Getting CA Private Key

另外2台的证书,直接复制第一个节点的即可。

3. 配置etcd

编辑/etc/etcd/etcd.conf,使用环境变量方式
以其中一个节点为例,其它节点更改相应IP即可

 ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
 ETCD_LISTEN_PEER_URLS="https://192.168.21.30:2380"
 ETCD_LISTEN_CLIENT_URLS="https://192.168.21.30:2379"
 ETCD_NAME="etcd1"
 ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.21.30:2380"
 ETCD_ADVERTISE_CLIENT_URLS="https://192.168.21.30:2379"
 ETCD_INITIAL_CLUSTER="etcd1=https://192.168.21.30:2380,etcd2=https://192.168.21.31:23
 80,etcd3=https://192.168.21.32:2380"
 ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
 ETCD_INITIAL_CLUSTER_STATE="new"
 ETCD_CERT_FILE="/etc/etcd/pki/etcd_server.crt"
 ETCD_KEY_FILE="/etc/etcd/pki/etcd_server.key"
 ETCD_CLIENT_CERT_AUTH="true"
 ETCD_TRUSTED_CA_FILE="/etc/kubernetes/pki/ca.crt"
 ETCD_PEER_CERT_FILE="/etc/etcd/pki/etcd_server.crt"
 ETCD_PEER_KEY_FILE="/etc/etcd/pki/etcd_server.key"
 ETCD_PEER_TRUSTED_CA_FILE="/etc/kubernetes/pki/ca.crt"

启动etcd并设置开机自启

 systemctl restart etcd && systemctl enable etcd

验证etcd集群健康与否

 etcdctl --ca-file=/etc/kubernetes/pki/ca.crt --cert-file=/etc/etcd/pki/etcd_client.crt 
 --key-file=/etc/etcd/pki/etcd_client.key 
 --endpoints=https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.21.32:2379 cluster-health
 member a5753ed960575bb4 is healthy: got healthy result from https://192.168.21.31:237
 9member ca2a47d444bac4dd is healthy: got healthy result from https://192.168.21.30:237
 9member d85cddbd7165b028 is healthy: got healthy result from https://192.168.21.32:237
 9cluster is healthy

三、部署k8s master高可用集群(1.19.0)

1. 下载服务端组件二进制包并将可执行文件拷贝到/usr/bin目录下

下载地址:https://dl.k8s.io/v1.19.0/kubernetes-server-linux-amd64.tar.gz
解压kubernetes-server-linux-amd64.tar.gz 把bin目录下的可执行文件拷贝到/usr/bin/ 目录下

 [root@master k8s-1.19.0]# ls
 kubernetes-client-linux-amd64.tar.gz kubernetes-server
 kubernetes-node-linux-amd64.tar.gz   kubernetes-server-linux-amd64.tar.gz
 ​
 [root@master bin]# pwd
 /root/k8s/k8s-1.19.0/kubernetes-server/server/bin
 [root@master bin]# ls
 1.txt                               kubectl
 apiextensions-apiserver             kubelet
 kubeadm                             kube-proxy
 kube-aggregator                     kube-proxy.docker_tag
 kube-apiserver                     kube-proxy.tar
 kube-apiserver.docker_tag           kube-scheduler
 kube-apiserver.tar                 kube-scheduler.docker_tag
 kube-controller-manager             kube-scheduler.tar
 kube-controller-manager.docker_tag mounter
 kube-controller-manager.tar
 [root@master bin]# find . -perm 755 -exec cp {} /usr/bin/ ;

2. 部署 kube-apiserver服务

  • 配置服务端CA证书

 [root@master pki]# pwd
 /etc/kubernetes/pki
 [root@master pki]# vim master_ssl.cnf
 [req]
 req_extensions = v3_req
 distinguished_name = req_distinguished_name
 ​
 [req_distinguished_name]
 ​
 [v3_req]
 basicConstraints = CA:FALSE
 keyUsage = nonRepudiation, digitalSignature, keyEncipherment
 subjectAltName = @alt_names
 ​
 [alt_names]
 DNS.1 = kubernetes
 DNS.2 = kubernetes.default
 DNS.3 = kubernetes.default.svc
 DNS.4 = kubernetes.default.svc.cluster.local
 DNS.5 = master
 DNS.6 = node1
 DNS.7 = node2
 DNS.8 = node3
 DNS.9 = node4
 IP.1 = 172.16.0.100
 IP.2 = 192.168.21.30
 IP.3 = 192.168.21.31
 IP.4 = 192.168.21.32
 IP.5 = 192.168.21.33
 IP.6 = 192.168.21.34
 IP.7 = 192.168.21.35
 ​
 [root@master pki]# openssl genrsa -out apiserver.key 2048
 Generating RSA private key, 2048 bit long modulus
 ...................................+++
 ...............................+++
 e is 65537 (0x10001)
 ​
 [root@master pki]# openssl req -new -key apiserver.key -config master_ssl.cnf -subj "
 /CN=192.168.21.30" -out apiserver.csr[root@master pki]# openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcr
 eateserial -days 36500 -extensions v3_req -extfile master_ssl.cnf -out apiserver.crtSignature ok
 subject=/CN=192.168.21.30
 Getting CA Private Key
  • 创建systemd服务

 [root@master pki]# vim /usr/lib/systemd/system/kube-apiserver.service
 ​
 [Unit]
 Description=kubernetes API Server
 Documentation=https://github.com/kubernetes/kubernetes
 ​
 [Service]
 EnvironmentFile=/etc/kubernetes/apiserver.conf
 ExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGS
 Restart=always
 ​
 [Install]
 WantedBy=multi-user.target
 ​
 创建配置文件/etc/kubernetes/apiserver.conf
 [root@master pki]# vim /etc/kubernetes/apiserver.conf
 KUBE_API_ARGS="--insecure-port=0 
 --secure-port=6443 
 --tls-cert-file=/etc/kubernetes/pki/apiserver.crt 
 --tls-private-key-file=/etc/kubernetes/pki/apiserver.key 
 --client-ca-file=/etc/kubernetes/pki/ca.crt
 --apiserver-count=3 --endpoint-reconciler-type=master-count 
 --etcd-servers=https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.
 21.32:2379 --etcd-cafile=/etc/kubernetes/pki/ca.crt 
 --etcd-certfile=/etc/etcd/pki/etcd_client.crt 
 --etcd-keyfile=/etc/etcd/pki/etcd_client.key 
 --service-cluster-ip-range=169.169.0.0/16 
 --service-node-port-rang=30000-32767 
 --allow-privileged=true 
 --logtostderr=false 
 --log-dir=/var/log/kubernetes --v=0"
  • 启动kube-apiserver并加入开机自启动

 systemctl start kube-apiserver.service && systemctl enable kube-apiserver.service
 ​
 [root@master k8s]# netstat -an |grep 6443
 tcp6       0      0 :::6443                 :::*                   LISTEN     
 tcp6       0      0 ::1:52844               ::1:6443               ESTABLISHED
 tcp6       0      0 ::1:6443               ::1:52844               ESTABLISHED
  • 创建客户端CA证书

 [root@master pki]# openssl genrsa -out client.key 2048
 Generating RSA private key, 2048 bit long modulus
 ....+++
 .........................................+++
 e is 65537 (0x10001)
 ​
 [root@master pki]# openssl req -new -key client.key -subj "/CN=admin" -out client.csr
 ​
 [root@master pki]# openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreat
 eserial -out client.crt -days 36500Signature ok
 subject=/CN=admin
 Getting CA Private Key
  • 创建客户端连接kube-apiserver服务所需的kubeconfig配置文件

 [root@master kubernetes]# pwd
 /etc/kubernetes
 [root@master kubernetes]# vim kubeconfig
 apiVersion: v1
 kind: Config
 clusters: 
 - name: default
  cluster:
    server: https://192.168.21.35:9443
    certificate-authority: /etc/kubernetes/pki/ca.crt
 users: 
 - name: admin
  user: 
    client-certificate: /etc/kubernetes/pki/client.crt
    client-key: /etc/kubernetes/pki/client.key
 contexts: 
 - context:
    cluster: default
    user: admin
  name: default
 current-context: default

3.部署kube-controller-manager服务

  • 创建kube-controller-manager的systemd服务

 [root@master kubernetes]# cat /usr/lib/systemd/system/kube-controller-manager.service
 ​
 [Unit]
 Description=Kubernetes Controller Manager
 Documentation=https://github.com/kubernetes/kubernetes
 ​
 [Service]
 EnvironmentFile=/etc/kubernetes/controller-manager.conf
 ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS
 Restart=always
 ​
 [Install]
 WantedBy=multi-user.target
  • 创建controller-manager.conf配置文件

 [root@master kubernetes]# cat controller-manager.conf 
 KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig 
 --leader-elect=true 
 --service-cluster-ip-range=169.169.0.0/16 
 --service-account-private-key-file=/etc/kubernetes/pki/apiserver.key 
 --root-ca-file=/etc/kubernetes/pki/ca.crt 
 --log-dir=/var/log/kubernetes --logtostderr=false --v=0"
  • 启动kube-controller-manager并设置为开机自启动

 [root@master kubernetes]# systemctl start kube-controller-manager.service && systemct
 [root@master kubernetes]# ps aux |grep kube-controller
 root     16451  6.1  1.8 810028 72476 ?       Ssl  09:50   0:01 /usr/bin/kube-contro
 ller-manager --kubeconfig=/etc/kubernetes/kubeconfig --leader-elect=true 
 --service-cluster-ip-range=169.169.0.0/16 --service-account-private-key-file=/etc/kubernetes/pki/apiserver.key 
 --root-ca-file=/etc/kubernetes/pki/ca.crt --log-dir=/var/log/kubernetes --logtostderr=false --v=0

4.部署kube-scheduler服务

  • 创建kube-scheduler的systemd服务

 [root@master kubernetes]# vim /usr/lib/systemd/system/kube-scheduler.service
 ​
 [Unit]
 Description=Kubernetes Scheduler
 Documentation=https://github.com/kubernetes/kubernetes
 ​
 [Service]
 EnvironmentFile=/etc/kubernetes/scheduler.conf
 ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
 Restart=always
 ​
 [Install]
 WantedBy=multi-user.target
  • 创建scheduler.conf配置文件

 [root@master kubernetes]# vim scheduler.conf
 [root@master kubernetes]# cat scheduler.conf 
 KUBE_SCHEDULER_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig 
 --leader-elect=true 
 --logtostderr=false --log-dir=/var/log/kubernetes --v=0"
  • 启动kube-scheduler.service并设置开机自启

[root@master kubernetes]# systemctl start kube-scheduler.service && systemctl enable 
 kube-scheduler.service Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.servi
 ce to /usr/lib/systemd/system/kube-scheduler.service.
[root@master kubernetes]# ps aux |grep kube-sch
 root     17369 10.1  1.1 746396 44876 ?       Ssl  10:04   0:01 /usr/bin/kube-schedu
 ler --kubeconfig=/etc/kubernetes/kubeconfig --leader-elect=true --logtostderr=false --log-dir=/var/log/kubernetes --v=0root     17407  0.0  0.0 112820  2256 pts/0   R+   10:04   0:00 grep --color=auto ku
 be-sch

5.使用haproxy和keepalive部署高可用的负载均衡器

为了避免单点故障,使用2台主机组成高可用,本例使用21.30,21.31这2台主机部署。vip:192.168.21.35

  • 安装haproxy和keepalived

 [root@master kubernetes]# yum install haproxy
 [root@master kubernetes]# yum install keepalived
  • 配置haproxy

 [root@master haproxy]# cat haproxy.cfg |grep -Ev "^#" |grep -Ev "^*#"
 ​
 global
    log         127.0.0.1 local2
 ​
     chroot     /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4096
    user       haproxy
    group       haproxy
    daemon
 ​
    stats socket /var/lib/haproxy/stats
 ​
 defaults
    mode                   http
    log                     global
    option                 httplog
    option                 dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                 redispatch
    retries                 3
    timeout http-request   10s
    timeout queue           1m
    timeout connect         10s
    timeout client         1m
    timeout server         1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
 ​
 frontend kube-apiserver 
     mode                 tcp
    bind                 *:9443
    option               tcplog
    default_backend     kube-apiserver
 ​
 listen stats
    mode           http
    bind           *:8888
    stats auth     admin:password
    stats refresh 5s
    stats realm   HAProxy Statistics
    stats uri     /stats
    log            127.0.0.1 local3 err
 backend kube-apiserver
    mode       tcp
    balance     roundrobin
    server master 192.168.21.30:6443 check
    server node1 192.168.21.31:6443 check
     server node2 192.168.21.32:6443 check
  • 启动haproxy并设为开机自启

 [root@master ~]# systemctl start haproxy.service && systemctl enable haproxy.service 
 Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /
 usr/lib/systemd/system/haproxy.service.

验证haproxy

  • 配置keepalived
    第一个节点:

 
[root@master keepalived]# cat keepalived.conf
 ! Configuration File for keepalived
 ​
 global_defs {
    router_id LVS_1
 }
 vrrp_script checkhaproxy {
  script "/usr/bin/check-haproxy.sh"
  interval 2
  weight -30
 }
 vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass password
    }
    virtual_ipaddress {
         192.168.21.35/24 dev eth0
    }
    track_script {
      checkhaproxy
    }
 }
 [root@master keepalived]# cat /usr/bin/check-haproxy.sh
 #!/bin/bash
 count=`netstat -apn | grep 9443 | wc -l`
 if [ $count -gt 0 ]; then
   exit 0
 else
   exit 1
 fi

 

第二个节点:

 
[root@node1 ~]# cat /etc/keepalived/keepalived.conf 
 ! Configuration File for keepalived
 ​
 global_defs {
    router_id LVS_2
 }
 vrrp_script checkhaproxy {
  script "/usr/bin/check-haproxy.sh"
  interval 2
  weight -30
 }
 vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass password
    }
    virtual_ipaddress {
         192.168.21.35/24 dev eth0
    }
    track_script {
      checkhaproxy
    }
 }

 

  • 启动keepalived并设为开机自启

 [root@master keepalived]# systemctl start keepalived.service && systemctl enable keep
 alived.service Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service t
 o /usr/lib/systemd/system/keepalived.service.[root@master keepalived]# ps aux |grep keep
 root     22629  0.0  0.0 123008  2108 ?       Ss   11:32   0:00 /usr/sbin/keepalived
 -Droot     22630  0.0  0.1 123008  5708 ?       S    11:32   0:00 /usr/sbin/keepalived
 -Droot     22631  0.0  0.1 125132  5708 ?       S    11:32   0:00 /usr/sbin/keepalived
  -Droot     22667  0.0  0.0 112820  2212 pts/0   S+   11:32   0:00 grep --color=auto keep
  • 验证keepalive

 [root@master keepalived]# curl -v -k https://192.168.21.35:9443
 * About to connect() to 192.168.21.35 port 9443 (#0)
 *   Trying 192.168.21.35...
 * Connected to 192.168.21.35 (192.168.21.35) port 9443 (#0)
 * Initializing NSS with certpath: sql:/etc/pki/nssdb
 * skipping SSL peer certificate verification
 * NSS: client certificate not found (nickname not specified)
 * SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
 * Server certificate:
 * subject: CN=192.168.21.30
 * start date: Jul 20 07:05:48 2021 GMT
 * expire date: Jun 26 07:05:48 2121 GMT
 * common name: 192.168.21.30
 * issuer: CN=192.168.21.30
 > GET / HTTP/1.1
 > User-Agent: curl/7.29.0
 > Host: 192.168.21.35:9443
 > Accept: */*
 > 
 < HTTP/1.1 401 Unauthorized
 < Cache-Control: no-cache, private
 < Content-Type: application/json
 < Date: Wed, 21 Jul 2021 03:36:48 GMT
 < Content-Length: 165
 < 
 {
   "kind": "Status",
   "apiVersion": "v1",
   "metadata": {
     
  },
   "status": "Failure",
   "message": "Unauthorized",
   "reason": "Unauthorized",
   "code": 401
 * Connection #0 to host 192.168.21.35 left intact

四. 部署网络组件flanneld

在所有node节点上安装flanneld

1. 下载flannel

下载地址:https://github.com/flannel-io/flannel/releases

flannel-v0.14.0-linux-amd64.tar.gz

解压后将 flanneld 和 mk-docker-opts.sh 拷贝到/usr/bin目录下(所有node节点)

 [root@node1 flannel]# pwd
 /root/k8s/flannel
 [root@node1 flannel]# ls
 flanneld mk-docker-opts.sh README.md
 [root@node1 flannel]# cp flanneld mk-docker-opts.sh /usr/bin/

2. 创建flanneld的systemd服务

 [root@node1 flannel]# cat /usr/lib/systemd/system/flanneld.service 
 [Unit]
 Description=Kubernetes Network Plugin Flannel
 Documentation=https://flannel
 After=network-online.target network.target
 Before=docker.service
 ​
 [Service]
 Type=notify
 EnvironmentFile=/etc/sysconfig/flanneld.conf
 ExecStart=/usr/bin/flanneld --ip-masq $FLANNEL_OPTIONS
 ExecStartPost=/usr/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
 Restart=on-failure
 ​
 [Install]
 WantedBy=multi-user.target

3. 创建flanneld.conf配置文件

 [root@node1 flannel]# cat /etc/sysconfig/flanneld.conf 
 ETCD_ENDPOINTS=${"https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.21.32:2379"}
 FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} 
 --etcd-cafile=/etc/kubernetes/pki/ca.crt 
 --etcd-certfile=/etc/kubernetes/pki/etcd_server.crt 
 --etcd-keyfile=/etc/kubernetes/pki/etcd_server.key 
 --etcd-prefix=/coreos.com/network 
 --iface=eth0"

4.在etcd中创建条目(master节点上操作)

 etcdctl --endpoints https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.21.32:2379 
 --ca-file /etc/kubernetes/pki/ca.crt --cert-file /etc/kubernetes/pki/etcd_server.crt 
 --key-file /etc/kubernetes/pki/etcd_server.key 
 set /coreos.com/network/config '{"Network":"172.16.0.0/16","Backend":{"Type":"vxlan"}}'

5. 启动flanneld服务并设置开机自启

 [root@node1 flannel]# systemctl start flanneld.service && systemctl enable flanneld.service

6. 验证

 # ifconfig
 flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
        inet 172.16.62.0 netmask 255.255.255.255 broadcast 172.16.62.0
        inet6 fe80::5898:9aff:fe32:56ab prefixlen 64 scopeid 0x20<link>
        ether 5a:98:9a:32:56:ab txqueuelen 0 (Ethernet)
        RX packets 3 bytes 252 (252.0 B)
        RX errors 0 dropped 0 overruns 0 frame 0
        TX packets 3 bytes 252 (252.0 B)
        TX errors 0 dropped 5 overruns 0 carrier 0 collisions 0

五. 部署docker服务

在所有node节点上安装docker,本例使用yum安装。

1. docker-ce.repo

 [root@node1 flannel]# cat /etc/yum.repos.d/docker-ce.repo 
 [docker-ce-stable]
 name=Docker CE Stable - $basearch
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/stable
 enabled=1
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-stable-debuginfo]
 name=Docker CE Stable - Debuginfo $basearch
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/debug-$basearch/stable
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-stable-source]
 name=Docker CE Stable - Sources
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/source/stable
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-test]
 name=Docker CE Test - $basearch
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/test
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-test-debuginfo]
 name=Docker CE Test - Debuginfo $basearch
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/debug-$basearch/test
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-test-source]
 name=Docker CE Test - Sources
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/source/test
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-nightly]
 name=Docker CE Nightly - $basearch
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/nightly
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-nightly-debuginfo]
 name=Docker CE Nightly - Debuginfo $basearch
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/debug-$basearch/nightly
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg
 ​
 [docker-ce-nightly-source]
 name=Docker CE Nightly - Sources
 baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/source/nightly
 enabled=0
 gpgcheck=1
 gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg

2. 安装

 yum install docker-ce

3. 配置docker.service

 [root@node1 flannel]# cat /usr/lib/systemd/system/docker.service
 [Unit]
 Description=Docker Application Container Engine
 Documentation=https://docs.docker.com
 After=network-online.target firewalld.service containerd.service
 Wants=network-online.target
 Requires=docker.socket containerd.service
 ​
 [Service]
 Type=notify
 # the default is not to use systemd for cgroups because the delegate issues still
 # exists and systemd currently does not support the cgroup feature set required
 # for containers run by docker
 #ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
 EnvironmentFile=/run/flannel/subnet.env
 ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
 ExecReload=/bin/kill -s HUP $MAINPID
 TimeoutSec=0
 RestartSec=2
 Restart=always
 ​
 # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
 # Both the old, and new location are accepted by systemd 229 and up, so using the old location
 # to make them work for either version of systemd.
 StartLimitBurst=3
 ​
 # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
 # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
 # this option work for either version of systemd.
 StartLimitInterval=60s
 ​
 # Having non-zero Limit*s causes performance problems due to accounting overhead
 # in the kernel. We recommend using cgroups to do container-local accounting.
 LimitNOFILE=infinity
 LimitNPROC=infinity
 LimitCORE=infinity
 ​
 # Comment TasksMax if your systemd version does not support it.
 # Only systemd 226 and above support this option.
 TasksMax=infinity
 ​
 # set delegate yes so that systemd does not reset the cgroups of docker containers
 Delegate=yes
 ​
 # kill only the docker process, not all processes in the cgroup
 KillMode=process
 OOMScoreAdjust=-500
 ​
 [Install]
 WantedBy=multi-user.target

4. 启动docker并设置开机自启

 systemctl start docker && systemctl enable docker

5. 查看docker网络

 [root@node1 flannel]# ifconfig
 docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
        inet 172.16.62.1 netmask 255.255.255.0 broadcast 172.16.62.255
        inet6 fe80::42:e0ff:fe18:9fa prefixlen 64 scopeid 0x20<link>
        ether 02:42:e0:18:09:fa txqueuelen 0 (Ethernet)
        RX packets 5 bytes 308 (308.0 B)
        RX errors 0 dropped 0  overruns 0 frame 0
        TX packets 10 bytes 904 (904.0 B)
        TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
 ​
 eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
        inet 192.168.21.31 netmask 255.255.255.0 broadcast 192.168.21.255
        inet6 fe80::8d62:e14a:b27d:d478 prefixlen 64 scopeid 0x20<link>
        inet6 fe80::95d3:ac4d:e02d:e037 prefixlen 64 scopeid 0x20<link>
        inet6 fe80::3ce7:8033:b538:bb4e prefixlen 64 scopeid 0x20<link>
        ether 3a:66:dd:a0:4b:f2 txqueuelen 1000 (Ethernet)
        RX packets 1984756 bytes 277698227 (264.8 MiB)
        RX errors 0 dropped 0 overruns 0 frame 0
        TX packets 1962569 bytes 272656611 (260.0 MiB)
        TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
 ​
 flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
        inet 172.16.62.0 netmask 255.255.255.255 broadcast 172.16.62.0
        inet6 fe80::5898:9aff:fe32:56ab prefixlen 64 scopeid 0x20<link>
        ether 5a:98:9a:32:56:ab txqueuelen 0 (Ethernet)
        RX packets 3 bytes 252 (252.0 B)
        RX errors 0 dropped 0 overruns 0 frame 0
        TX packets 3 bytes 252 (252.0 B)
        TX errors 0 dropped 5 overruns 0 carrier 0 collisions 0
 ​
 lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
        inet 127.0.0.1 netmask 255.0.0.0
        inet6 ::1 prefixlen 128 scopeid 0x10<host>
        loop txqueuelen 1000 (Local Loopback)
        RX packets 534202 bytes 108091817 (103.0 MiB)
        RX errors 0 dropped 0 overruns 0 frame 0
        TX packets 534202 bytes 108091817 (103.0 MiB)
        TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

6. docker国内镜像加速

 [root@node1 ~]# cat /etc/docker/daemon.json 
 {
 "insecure-registries":["192.168.21.34"],
 "registry-mirrors": ["https://s7s5jkzp.mirror.aliyuncs.com"]
 }

7. docker私有仓库harbor

官网地址:https://goharbor.io/,本例使用192.168.21.34这台主机。

六. 部署Node服务

1. 下载客户端组件二进制包并将可执行文件拷贝到所有Node节点的/usr/bin目录下

下载地址:https://dl.k8s.io/v1.19.0/kubernetes-node-linux-amd64.tar.gz
解压kubernetes-node-linux-amd64.tar.gz 把bin目录下的可执行文件拷贝到/usr/bin/ 目录下

2. 部署kubelet服务

  • 创建kubelet的systemd服务

 [root@node1 pki]# cat /usr/lib/systemd/system/kubelet.service 
 [Unit]
 Description=Kubernetes Kubelet Server
 Documentation=https://github.com/kubernetes/kubernetes
 After=docker.target
 ​
 [Service]
 EnvironmentFile=/etc/kubernetes/kubelet.conf
 ExecStart=/usr/bin/kubelet $KUBELET_ARGS
 Restart=always
 ​
 [Install]
 WantedBy=multi-user.target
  • 创建kublete.conf配置文件

[root@node1 kubernetes]# cat kubelet.conf
 KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig 
 --config=/etc/kubernetes/kubelet.config 
 --hostname-override=192.168.21.31 #其它节点修改成相应的IP地址
 --network-plugin=flannel 
 --logtostderr=false --log-dir=/var/log/kubernetes --v=0 
 --runtime-cgroups=/systemd/system.slice 
 --kubelet-cgroups=/systemd/system.slice"

 [root@node1 kubernetes]# cat kubelet.config 
 kind: kubeletConfiguration
 apiVersion: kubelet.config.k8s.io/v1beta1
 address: 0.0.0.0
 port: 10250
 cgroupDriver: cgroupfs
 cluster-dns=172.16.0.100
 cluster-domain=cluster.local
 authentication:
  anonymous:
    enabled: true
  • 启动kubelet.service并设置开机自启

 [root@node1 ~]# systemctl start kubelet.service && systemctl enable kubelet.service
 [root@node1 ~]# ps aux |grep kubelet
 root      4821  0.0  0.0 112716  2264 pts/0   S+   12:02   0:00 grep --color=auto kubelet
 root     21424  0.6  2.2 1168096 91636 ?       Ssl  10:59   0:24 /usr/bin/kubelet --kubeconfig=/etc/kubernetes/kubeconfig --config=/e
 tc/kubernetes/kubelet.config --hostname-override=192.168.21.31 --logtostderr=false --log-dir=/var/log/kubernetes --v=0 --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice
  • 查看后台进程

 [root@node1 ~]# ps aux |grep kubelet
 root     17062  0.0  0.0 112716  2188 pts/0   S+   12:53   0:00 grep --color=auto kubelet
 root     21424  0.6  2.3 1168096 93684 ?       Ssl  10:59   0:43 /usr/bin/kubelet --kubeconfig=/etc/kubernetes/kubeconfig --config=/e
 tc/kubernetes/kubelet.config --hostname-override=192.168.21.31 --logtostderr=false --log-dir=/var/log/kubernetes --v=0 --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice

3. 部署kube-proxy服务

  • 创建kube-proxy的systemd服务

 [root@node1 ~]# cat /usr/lib/systemd/system/kube-proxy.service 
 [Unit]
 Description=kubernetes kube-proxy Server
 Documentation=https://github.com/kubernetes/kubernetes
 After=network.target
 ​
 [Service]
 EnvironmentFile=/etc/kubernetes/kube-proxy.conf
 ExecStart=/usr/bin/kube-proxy $KUBE_PROXY_ARGS
 Restart=always
 ​
 [Install]
 WantedBy=multi-user.target
  • 创建kube-proxy的配置文件

 [root@node1 ~]# cat /etc/kubernetes/kube-proxy.conf
 KUBE_PROXY_ARGS="--kubeconfig /etc/kubernetes/kubeconfig 
 --hostname-override 192.168.21.31 #其它节点修改成相应的IP地址
 --proxy-mode iptables 
 --logtostderr=false 
 --log-dir /var/log/kubernetes
  --v=0"
  • 启动kube-proxy.service并设置开机自启

 [root@node3 ~]# systemctl enable kube-proxy.service 
 Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
 [root@node3 ~]# systemctl start kube-proxy.service
  • 查看后台进程

 [root@node1 ~]# ps aux |grep kube
 root      3451  0.0  1.0 743152 41760 ?       Ssl  11:57   0:01 /usr/bin/kube-proxy 
 --kubeconfig /etc/kubernetes/kubeconfig --hostname-override 192.168.21.31 
 --proxy-mode iptables --logtostderr=false 
 --log-dir /var/log/kubernetes --v=0

七. 部署CoreDNS服务

1. 创建资源文件

coredns需要3个资源对像,1个configmap,1个Deployment和1个service。编辑coredns.yaml文件包含这3个资源对像

 apiVersion: v1
 kind: ConfigMap
 metadata: 
  name: coredns
  namespace: kube-system
  labels: 
    addonmanager.kubernetes.io/mode: EnsureExists
 data: 
  Corefile: |
    cluster.local {
      errors
      health { 
        lameduck 5s
      }
      ready
      kubernetes cluster.local 172.16.0.0/16 { 
        fallthrough in-addr.arpa ip6.arpa
      }
      prometheus: 9153
      forward . /etc/resolv.conf
      cache 30
      loop
      reload
      loadbalance
    }
    . {
      cache 30
      loadbalance
      forward . /etc/resolv.conf
    }
 ---
 apiVersion: apps/v1
 kind: Deployment
 metadata: 
  name: coredns
  namespace: kube-system
  labels: 
    k8s-app: coredns
    kubernetes.io/name: "CoreDNS"
    kubernetes.io/cluster-service: "true"
 spec: 
  replicas: 1
  selector: 
    matchLabels: 
      k8s-app: coredns
  template: 
    metadata: 
      labels: 
        k8s-app: coredns
      annotations: 
        scheduler.alpha.kubernetes.io/critical-pod: ''
        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly","operator":"Exists"}]'
    spec: 
      containers: 
       - name: coredns
        image: 192.168.21.34/release/coredns:latest
        imagePullPolicy: IfNotPresent
        resources: 
          limits: 
            memory: 170Mi
          requests: 
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts: 
         - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports: 
         - containerPort: 53
          name: dns
          protocol: UDP
         - containerPort: 53
          name: dns-tcp
          protocol: TCP
         - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities: 
            add: 
             - NET_BIND_SERVICE
            drop:
             - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
         - name: config-volume
          configMap:
            name: coredns
            items:
             - key: Corefile
              path: Corefile
 ---
 apiVersion: v1
 kind: Service
 metadata: 
  name: coredns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels: 
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
 spec: 
  selector: 
    k8s-app: coredns
  clusterIP: 172.16.0.100
  ports: 
   - name: dns
    port: 53
    protocol: UDP
   - name: dns-tcp
    port: 53
    protocol: TCP
   - name: metrics
    port: 9153
    protocol: TCP

2. 创建coredns

 [root@node1 coredns]# kubectl create -f coredns.yaml 
 configmap/coredns created
 deployment.apps/coredns created
 service/coredns created

查看各资源的状态

 [root@node1 coredns]# kubectl get all --namespace=kube-system
 NAME                           READY   STATUS             RESTARTS   AGE
 pod/coredns-7bff699665-zfj5r   0/1     CrashLoopBackOff   6         10m
 ​
 NAME             TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)                 AGE
 service/coredns   ClusterIP   172.16.0.100   <none>        53/UDP,53/TCP,9153/TCP   10m
 ​
 NAME                     READY   UP-TO-DATE   AVAILABLE   AGE
 deployment.apps/coredns   0/1     1            0           10m
 ​
 NAME                                 DESIRED   CURRENT   READY   AGE
 replicaset.apps/coredns-7bff699665   1         1         0       10m

情况有点不妙,pod和deployment资源没有ready起来。先看下pod的日志

 [root@node1 coredns]# kubectl logs pod/coredns-7bff699665-zfj5r --namespace=kube-system
 /etc/coredns/Corefile:10 - Error during parsing: Unknown directive 'prometheus:'

发现,有个指令示识别(prometheus:),于是在coredns.yaml文件中注释掉这行

 ready
      kubernetes cluster.local 172.16.0.0/16 { 
        fallthrough in-addr.arpa ip6.arpa
      }
       #prometheus: 9153
      forward . /etc/resolv.conf

再次重新apply 一下

 [root@node1 coredns]# kubectl apply -f coredns.yaml 
 configmap/coredns configured
 deployment.apps/coredns unchanged
 service/coredns unchanged

查看状态正常

 [root@node1 coredns]# kubectl get all --namespace=kube-system
 NAME                           READY   STATUS   RESTARTS   AGE
 pod/coredns-7bff699665-zfj5r   1/1     Running   11         61m
 ​
 NAME             TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)                 AGE
 service/coredns   ClusterIP   172.16.0.100   <none>        53/UDP,53/TCP,9153/TCP   61m
 ​
 NAME                     READY   UP-TO-DATE   AVAILABLE   AGE
 deployment.apps/coredns   1/1     1            1           61m
 ​
 NAME                                 DESIRED   CURRENT   READY   AGE
 replicaset.apps/coredns-7bff699665   1         1         1       61m

3. 验证coredns

创建一个nginx的pod和service

 [root@node1 k8s]# cat svc/coredns-test.yaml 
 apiVersion: v1
 kind: Pod
 metadata:
  name: nginx
  labels:
    app: nginx
 spec:
  containers:
   - name: nginx
     image: 192.168.21.34/release/nginx:v1.21.1
    ports:
     - containerPort: 80
 ---
 apiVersion: v1
 kind: Service
 metadata:
   name: nginx
 spec:
  ports:
   - port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: nginx
 ​
 [root@node1 svc]# kubectl create -f coredns-test.yaml 
 pod/nginx created
 service/nginx created

再创建一个名为myweb的pod

 [root@node1 pod]# cat nginx-pod-1.yaml 
 apiVersion: v1
 kind: Pod
 metadata: 
  name: myweb 
 spec: 
  containers: 
     - name: web
      image: 192.168.21.34/release/nginx:v1.21.1
      imagePullPolicy: IfNotPresent

进入myweb容器,curl nginx

 [root@node1 pod]# kubectl exec -it myweb -- /bin/bash
 root@myweb:/# curl nginx
 <!DOCTYPE html>
 <html>
 <head>
 <title>Welcome to nginx!</title>
 <style>
    body {
         35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
 </style>
 </head>
 <body>
 <h1>Welcome to nginx!</h1>
 <p>If you see this page, the nginx web server is successfully installed and
 working. Further configuration is required.</p>
 ​
 <p>For online documentation and support please refer to
 <a href="http://nginx.org/">nginx.org</a>.<br/>
 Commercial support is available at
 <a href="http://nginx.com/">nginx.com</a>.</p>
 ​
 <p><em>Thank you for using nginx.</em></p>
 </body>
 </html>
 root@myweb:/# exit
 exit

发现使用nginx名字可以访问资源,使用busybox 测试一下nslookup

 [root@node1 pod]# cat busybox.yaml 
 apiVersion: v1
 kind: Pod
 metadata: 
  name: busybox
  namespace: default
 spec: 
  containers: 
   - name: busybox
    image: 192.168.21.34/release/busybox:latest
    command: 
       - sleep
       - "3600"
 [root@node1 pod]# kubectl exec busybox -- nslookup nginx
 Server: 172.16.0.100
 Address:172.16.0.100:53
 ​
 Name:nginx.default.svc.cluster.local
 Address: 172.16.28.151

八. 验证集群

1. 在master节点上通过kubectl验证node信息

kubectl --kubeconfig=/etc/kubernetes/kubeconfig get nodes
NAME            STATUS   ROLES    AGE   VERSION
192.168.21.31   Ready    <none>   24h   v1.19.0
192.168.21.32   Ready    <none>   24h   v1.19.0
192.168.21.33   Ready    <none>   24h   v1.19.0


=======================================================================

知识无边界,交流以长进

如需转载,请注明出处,谢谢

=======================================================================

发表回复