TOC

主机环境预设

    在正式安装Kubernetes集群之前需要对所有的主机进行一次环境的设置,为后续的集群安装做好准备;
设定时钟同步
    若节点可直接访问互联网,直接启动chronyd系统服务,并设定其随系统引导而启动;
[root@node1 ~]# yum install -y chrony
[root@node1 ~]# systemctl start chronyd.service
[root@node1 ~]# systemctl enable chronyd.service
主机名称解析
    出于简化配置步骤的目的,本测试环境使用hosts文件进行各节点名称解析,文件内容如下所示:
node1
    172.16.1.1 node1 node01.cce.com
    172.16.1.2 node2 node02.cce.com
    172.16.1.3 node3 node03.cce.com
    172.16.1.4 node3 node04.cce.com
node2
    172.16.1.1 node1 node01.cce.com
    172.16.1.2 node2 node02.cce.com
    172.16.1.3 node3 node03.cce.com
    172.16.1.4 node3 node04.cce.com
node3
    172.16.1.1 node1 node01.cce.com
    172.16.1.2 node2 node02.cce.com
    172.16.1.3 node3 node03.cce.com
    172.16.1.4 node3 node04.cce.com
node3
    172.16.1.1 node1 node01.cce.com
    172.16.1.2 node2 node02.cce.com
    172.16.1.3 node3 node03.cce.com
    172.16.1.4 node3 node04.cce.com
文件分发脚本
    基于主机互信的情况下,进行文件批量分发。
#!/usr/bin/env bash
#

nodes=(
node01
node02
node03
node04
node05
)

for item in ${nodes[*]}
do
  scp $1 $item:$1
done
关闭Iptables或Firewalld服务
    在CentOS7上,iptables或firewalld服务通常只会安装并启动一种,在不确认具体启动状态的前提下,这里通过同时关闭并禁用二者即可简单达到设定目标;
[root@node1 ~]# systemctl stop firewalld.service
[root@node1 ~]# systemctl stop iptables.service
[root@node1 ~]# systemctl disable firewalld.service
[root@node1 ~]# systemctl disable iptables.service
禁用Swap设备
    部署集群时,kubeadm默认会预先检查当前主机是否禁用了Swap设备,并在未禁用时强制终止部署过程。因此,在主机内存资源充裕的条件下,需要禁用所有的Swap设备,否则,就需要在后文的kubeadm init及kubeadm join命令执行时额外使用相关的选项忽略检查错误;
# 关闭Swap设备,需要分两步完成。首先是关闭当前已启用的所有Swap设备:
[root@node1 ~]# swapoff -a
# 而后编辑/etc/fstab配置文件,注释用于挂载Swap设备的所有行。
[root@node1 ~]# sed -ri 's@(.*swap.*)@#\1@g' /etc/fstab
内核基本参数优化
[root@node1 ~]# cat >> /etc/sysctl.conf << EOF
# k8s
# 禁止ipv6的监听会导致rpcbind不能启动,因为nfs会监听ipv6
#net.ipv6.conf.all.disable_ipv6 = 1
#net.ipv6.conf.default.disable_ipv6 = 1
#net.ipv6.conf.lo.disable_ipv6 = 1

vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
net.ipv4.neigh.default.gc_stale_time=120
net.netfilter.nf_conntrack_max=2310720
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle=0

# see details in https://help.aliyun.com/knowledge_detail/39428.html
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2

# fs
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963

# see details in https://help.aliyun.com/knowledge_detail/41334.html
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
kernel.sysrq = 1
# 允许绑定不存在的ip
net.ipv4.ip_nonlocal_bind = 1
net.ipv6.ip_nonlocal_bind = 1

# 允许转发
net.ipv4.ip_forward = 1
EOF
[root@node03 ~]# sysctl -p

主控节点部署

    下面就正式进去集群部署环节了,采用Kubernetes二进制源码v1.17.3版本安装,其中etcd使用3.3.10版本,以下主要是利用四台主机进行集群安装,其中两个主节点,两个工作节点,详情如下表格;
节点名称 IP地址 角色 组件
node01.cce.com 172.16.1.1 Master etcd、kube-apiserver、kube-scheduler、kube-controller-manager、kubectl、ingress
node02.cce.com 172.16.1.2 Master etcd、kube-apiserver、kube-scheduler、kube-controller-manager、kubectl
node03.cce.com 172.16.1.3 Node etcd、kube-proxy、kubelet、flannel
node04.cce.com 172.16.1.4 Node kube-proxy、kubelet、flannel
配置cfssl证书签证环境
CFSSL是CloudFlare开源的一款PKI/TLS工具。 CFSSL 包含一个命令行工具 和一个用于 签名,验证并且捆绑TLS证书的 HTTP API 服务,使用Go语言编写;
    cfssl:证书签发的主要工具;
    cfssljson:将cfssl生成的json格式的证书转换文本证书,只是转换下证书的格式;
    cfssl-certinfo:验证证书信息;
[root@node01 ~]# curl -L http://data.doorta.com/cfssl_linux-amd64 -o /usr/local/bin/cfssl
[root@node01 ~]# curl -L http://data.doorta.com/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
[root@node01 ~]# curl -L http://data.doorta.com/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
[root@node01 ~]# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
# 创建etcd证书存放目录
[root@node01 ~]# mkdir -p /etc/kubernetes/certs/ca
# 生成一个CA的自签证书配置
[root@node01 ~]# cat << EOF >> /etc/kubernetes/certs/ca/ca-csr.json
{
    "CN": "Kubernetes CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF
# 生成CA证书
[root@node01 ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
部署etcd三节点集群
    etcd分布式服务发现中间件,主要为Kubernetes集群提供数据存储服务,Kubernetes集群内外各信息,都将存储在etcd当中,所以etcd至关重要,一般将其部署为一个至少三节点的分布式集群,对于Kubernetes,亲测1.17以下的暂不支持较高的etcd应用程序,如下采用3.3.10版本构建一个三节点的集群为Kubernetes提供存储服务;
    client certificate: 用于服务端认证客户端,例如etcdctl、etcd proxy、fleetctl、docker客户端;
    server certificate: 服务端使用,客户端以此验证服务端身份,例如docker服务端、kube-apiserver;
    peer certificate: 双向证书,用于etcd集群成员间通信;
# 创建证书目录
[root@node01 ~]# mkdir /etc/kubernetes/certs/etcd
# 创建etcd双向认证证书配置;
[root@node01 ~]# cat > /etc/kubernetes/certs/etcd/ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "438000h"
    },
    "profiles": {
      "server": {
        "expiry": "438000h",
        "usages": [
          "signing",
          "key encipherment",
          "server auth"
        ]
      },
      "client": {
        "expiry": "438000h",
        "usages": [
          "signing",
          "key encipherment",
          "client auth"
        ]
      },
      "peer": {
        "expiry": "438000h",
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ]
      }
    }
  }
}
EOF
# 创建etcd证书请求文件
[root@node01 ~]# cat > /etc/kubernetes/certs/etcd/etcd-peer-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "node01.cce.com",
    "node02.cce.com",
    "node03.cce.com",
    "172.16.1.1",
    "172.16.1.2",
    "172.16.1.3"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF
# 生成etcd证书
[root@node01 etcd]# cfssl gencert -ca=/etc/kubernetes/certs/ca/ca.pem -ca-key=/etc/kubernetes/certs/ca/ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssljson -bare etcd-peer
[root@node01 etcd]# ls
ca-config.json  etcd-peer.csr  etcd-peer-csr.json  etcd-peer-key.pem  etcd-peer.pem
# 配置etcd证书,将node1上面的证书分别分发到node2、node3上
[root@node01 ~]# scp -r /etc/kubernetes/ node2:/etc
[root@node01 ~]# scp -r /etc/kubernetes/ node3:/etc
# 配置etcd程序端(node1、node2、node3都需要执行)
[root@node01 ~]# mkdir -p /usr/local/etcd/bin
[root@node01 ~]# tar xf etcd-v3.3.10-linux-amd64.tar.gz 
[root@node01 ~]# mv etcd-v3.3.10-linux-amd64/etcd etcd-v3.3.10-linux-amd64/etcdctl /usr/local/etcd/bin/
# 创建启动脚本
[root@node01 ~]# cat << EOF > /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/local/etcd/bin/etcd \
--name etcd1 \
--data-dir /data/etcd/data \
--listen-peer-urls https://172.16.1.1:2380 \
--listen-client-urls https://127.0.0.1:2379,https://172.16.1.1:2379 \
--quota-backend-bytes 8000000000 \
--initial-advertise-peer-urls https://172.16.1.1:2380 \
--advertise-client-urls https://172.16.1.1:2379,http://127.0.0.1:2379 \
--initial-cluster etcd1=https://172.16.1.1:2380,etcd2=https://172.16.1.2:2380,etcd3=https://172.16.1.3:2380 \
--log-package-levels=DEBUG \
--cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem \
--key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem \
--peer-cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem \
--peer-key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem \
--trusted-ca-file=/etc/kubernetes/certs/ca/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/certs/ca/ca.pem \
--peer-client-cert-auth \
--log-output=stdout \
--enable-v2=true

[Install]
WantedBy=multi-user.target
EOF
[root@node1 ~]# systemctl daemon-reload
[root@node1 ~]# systemctl start etcd.service
[root@node1 ~]# systemctl enable etcd.service
# 查看集群成员
[root@node01 ~]# ETCDCTL_API=2 /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379" --key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem --ca-file=/etc/kubernetes/certs/ca/ca.pem member list
71ae8e01d16af95d: name=etcd3 peerURLs=https://172.16.1.3:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.3:2379 isLeader=false
7643de5934f96269: name=etcd2 peerURLs=https://172.16.1.2:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.2:2379 isLeader=false
a0e08efea825151f: name=etcd1 peerURLs=https://172.16.1.1:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.1:2379 isLeader=true
部署kube-apiserver
    部署kube-apiserver,kube-apiserver为主节点的一个应用程序,需要分别部署在node01、node02之上;
# 当我们有非常多的node节点时,手动为每个node节点配置TLS认证比较麻烦,这时就可以用到引导token的认证方式,前提是需要在api-server开启 experimental-bootstrap-token-auth 特性,客户端的token信息与预先定义的token匹配认证通过后,自动为node颁发证书。当然引导token是一种机制,可以用到各种场景中,如下就开始生成token.csv文件;
[root@node01 ~]# mkdir -p /etc/kubernetes/{conf,certs/apiserver}
[root@node01 ~]# cat > /etc/kubernetes/conf/token.csv <<EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
# 开始签署ApiServer的Client证书,当ApiServer需要和etcd通信时就可以携带着这个证书与etcd集群通讯,在这个通信当中etcd集群的Server端,ApiServer是客户端
[root@node01 ~]# cat > /etc/kubernetes/certs/apiserver/etcd-client-csr.json << EOF 
{
  "CN": "etcd-k8s-client",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "cce",
      "OU": "ops"
    }
  ]
}
EOF
# 签发ApiServer作为etcd的Client的证书
[root@node01 apiserver]# cfssl gencert -ca=/etc/kubernetes/certs/ca/ca.pem -ca-key=/etc/kubernetes/certs/ca/ca-key.pem -config=/etc/kubernetes/certs/etcd/ca-config.json -profile=client etcd-client-csr.json | cfssljson -bare etcd-client-csr
# 创建ApiServer自用证书,因为ApiServer也基于SSL协议工作的,因此如下开始签发ApiServer自用的证书,其中的IP为ApiServer的主机,127.0.0.1为本地通信,172.16.x.x为所有主节点的kube-apiserver服务的证书,而10.10.0.1为apiserver在集群内部的证书,因为有的服务可能需要在集群内部访问kube-apiserver,比如我们的coredns,就需要在Pod内部去调用kube-apiserver,所以当我们就需要将集群外部的服务(宿主机的服务)映射到集群内部来,所以此时我们就需要在制作kube-apiserver的时候同时也将10.10.0.1写进去,这个IP默认就是kube-apiserver在集群内部的一个service的IP;
[root@node01 ~]# cat > /etc/kubernetes/certs/apiserver/apiserver-csr.json << EOF 
{
  "CN": "kubernetes",
  "hosts": [
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local",
    "127.0.0.1",
    "172.16.1.1",
    "172.16.1.2",
    "10.10.0.1"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "k8s",
      "OU": "cloudteam"
    }
  ]
}
EOF
# 签发证书
[root@node01 apiserver]# cfssl gencert -ca=/etc/kubernetes/certs/ca/ca.pem -ca-key=/etc/kubernetes/certs/ca/ca-key.pem -config=/etc/kubernetes/certs/etcd/ca-config.json -profile=server apiserver-csr.json | cfssljson -bare apiserver
# 创建metrics-server使用的证书
[root@node01 ~]# mkdir /etc/kubernetes/certs/metrics-server/
[root@node01 ~]# cat > /etc/kubernetes/certs/metrics-server/metrics-server-csr.json << EOF 
{
  "CN": "aggregator",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
# 签发证书
[root@node1 metrics-server]# cfssl gencert -ca=/etc/kubernetes/certs/ca/ca.pem -ca-key=/etc/kubernetes/certs/ca/ca-key.pem -config=/etc/kubernetes/certs/etcd/ca-config.json -profile=client metrics-server-csr.json | cfssljson -bare metrics-server
# 创建ApiServer家目录
[root@node01 ~]# mkdir -p /usr/local/kube-apiserver/bin
[root@node01 ~]# mv kube-apiserver /usr/local/kube-apiserver/bin/
[root@node01 ~]# chmod +x /usr/local/kube-apiserver/bin/kube-apiserver
# 配置审计日志
[root@node01 ~]# cat << EOF > /etc/kubernetes/conf/audit-policy.yaml
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"
EOF
# 创建apiserver启动脚本
[root@node01 ~]# mkdir -p /data/kubernetes/kube-apiserver
[root@node01 ~]# cat << EOF > /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service

[Service]
ExecStart=/usr/local/kube-apiserver/bin/kube-apiserver \
            --apiserver-count=2 \
            --secure-port=6443 \
            --advertise-address=172.16.1.1 \
            --bind-address=172.16.1.1 \
            --token-auth-file=/etc/kubernetes/conf/token.csv \
            --kubelet-https=true \
            --service-node-port-range=30000-50000 \
            --enable-bootstrap-token-auth \
            --authorization-mode=Node,RBAC \
            --client-ca-file=/etc/kubernetes/certs/ca/ca.pem \
            --requestheader-allowed-names=aggregator \
            --requestheader-extra-headers-prefix=X-Remote-Extra- \
            --requestheader-group-headers=X-Remote-Group \
            --requestheader-username-headers=X-Remote-User \
            --proxy-client-cert-file=/etc/kubernetes/certs/metrics-server/metrics-server.pem \
            --proxy-client-key-file=/etc/kubernetes/certs/metrics-server/metrics-server-key.pem \
            --requestheader-client-ca-file=/etc/kubernetes/certs/ca/ca.pem \
            --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
            --allow-privileged=true \
            --runtime-config=api/all=true \
            --etcd-cafile=/etc/kubernetes/certs/ca/ca.pem \
            --etcd-certfile=/etc/kubernetes/certs/apiserver/etcd-client-csr.pem \
            --etcd-keyfile=/etc/kubernetes/certs/apiserver/etcd-client-csr-key.pem \
            --etcd-servers=https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379 \
            --service-account-key-file=/etc/kubernetes/certs/ca/ca-key.pem \
            --service-cluster-ip-range=10.10.0.0/16 \
            --service-node-port-range=3000-29999 \
            --target-ram-mb=1024 \
            --enable-aggregator-routing=true \
            --kubelet-client-certificate=/etc/kubernetes/certs/apiserver/etcd-client-csr.pem \
            --kubelet-client-key=/etc/kubernetes/certs/apiserver/etcd-client-csr-key.pem \
            --log-dir=/data/kubernetes/kube-apiserver \
            --tls-cert-file=/etc/kubernetes/certs/apiserver/apiserver.pem \
            --tls-private-key-file=/etc/kubernetes/certs/apiserver/apiserver-key.pem \
            --audit-policy-file=/etc/kubernetes/conf/audit-policy.yaml \
            --audit-log-path=/data/kubernetes/kube-apiserver/kube-apiserver.audit \
            --audit-log-maxage=7 \
            --audit-log-maxbackup=4 \
            --logtostderr=false \
            --v=4
Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
[root@node01 ~]# systemctl daemon-reload
[root@node01 ~]# systemctl start kube-apiserver.service
[root@node01 ~]# netstat -ntlp|grep 6443
tcp        0      0 172.16.1.1:6443         0.0.0.0:*               LISTEN      7415/kube-apiserver
部署nginx反代kube-apiserver
    因为我们的主节点是两个,并且我们的ApiServer是一个无状态的服务,那么因此,我们就可以将其利用Nginx或者HaProxy来进行反代,组成一个负载均衡集群,也可以作为冗余机器,因为我们的ApiServer默认不是经过http协议来工作的,所以我们要利用nginx的stream模块来进行四层反代,具体如下;
[root@node01 ~]# tar xf nginx-1.16.1.tar.gz
[root@node01 nginx-1.16.1]# ./configure --prefix=/usr/local/nginx --with-stream
[root@node01 ~]# cat >> /etc/nginx/nginx.conf << EOF
stream {
    log_format ws "\$remote_addr \$upstream_addr \$time_local \$status";
    access_log /usr/local/nginx/logs/kubernetes.log ws;
    upstream backend {
        hash $remote_addr consistent;
        server 172.16.1.1:6443        max_fails=3 fail_timeout=30s;
        server 172.16.1.2:6443        max_fails=3 fail_timeout=30s;
    }

    server {
        listen 172.16.1.3:8443;
        proxy_connect_timeout 1s;
        proxy_pass backend;
    }
}
EOF
[root@node01 ~]# cat << EOF > /usr/lib/systemd/system/nginx.service
[Unit]
Description=The NGINX HTTP and reverse proxy server
After=syslog.target network.target remote-fs.target nss-lookup.target

[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/local/nginx/sbin/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/usr/bin/kill -s QUIT $MAINPID
PrivateTmp=true

[Install]
WantedBy=multi-user.target
EOF
[root@node01 ~]# systemctl daemon-reload 
[root@node01 ~]# systemctl start nginx
[root@node01 ~]# systemctl enable nginx
部署kube-controller-manager
    kube-controller-manager作为我们的Pod控制器管理器也是至关重要的,它只需要负责与我们的ApiServer进行通讯,都是比较单一的组件,所以配置也极其的简单,如下;
# 创建kube-controller-manager家目录
[root@node01 ~]# mkdir -p /usr/local/kube-controller-manager/bin
[root@node01 ~]# mv kube-controller-manager /usr/local/kube-controller-manager/bin/
[root@node01 ~]# chmod +x /usr/local/kube-controller-manager/bin/kube-controller-manager
# 提供启动配置
[root@node01 ~]# mkdir -p /data/kubernetes/kube-controller-manager
[root@node01 ~]# cat << EOF > /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/kube-controller-manager/bin/kube-controller-manager  \
        --cluster-cidr 172.20.0.0/16 \
        --leader-elect true \
        --address=127.0.0.1 \
        --cluster-name=kubernetes \
        --log-dir=/data/kubernetes/kube-controller-manager \
        --cluster-signing-cert-file=/etc/kubernetes/certs/ca/ca.pem \
        --cluster-signing-key-file=/etc/kubernetes/certs/ca/ca-key.pem \
        --horizontal-pod-autoscaler-use-rest-clients=true \
        --master=http://127.0.0.1:8080 \
        --experimental-cluster-signing-duration=438000h \
        --service-account-private-key-file=/etc/kubernetes/certs/ca/ca-key.pem \
        --service-cluster-ip-range=10.10.0.0/16 \
        --root-ca-file=/etc/kubernetes/certs/ca/ca.pem \
        --logtostderr=false \
        --v=4
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
[root@node01 ~]# systemctl daemon-reload
[root@node01 ~]# systemctl start kube-controller-manager.service
[root@node01 ~]# systemctl enable kube-controller-manager.service
部署kube-scheduler
    kube-scheduler作为集群的调度器,它主要的工作是从集群所有节点中,根据调度算法挑选出所有可以运行该Pod的节点,再根据调度算法从上述node节点选择最优节点作为最终结果,kube-scheduler调度器运行在master节点,它的核心功能是监听ApiServer来获取PodSpec.NodeName为空的Pod,然后为pod创建一个binding指示Pod应该调度到哪个节点上,调度结果写入ApiServer,只需要和我们的ApiServer交互,如下;
# 创建kube-scheduler家目录
[root@node01 ~]# mkdir -p /usr/local/kube-scheduler/bin
[root@node01 ~]# mv kube-scheduler /usr/local/kube-scheduler/bin/
[root@node01 ~]# chmod +x /usr/local/kube-scheduler/bin/kube-scheduler
# 提供启动配置
[root@node01 ~]# mkdir -p /data/kubernetes/kube-scheduler 
[root@node01 ~]# cat << EOF > /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/kube-scheduler/bin/kube-scheduler \
        --leader-elect=true \
        --log-dir=/data/kubernetes/kube-scheduler \
        --master=http://127.0.0.1:8080 \
        --logtostderr=false \
        --v=4
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
[root@node01 ~]# systemctl daemon-reload
[root@node01 ~]# systemctl start kube-scheduler.service
[root@node01 ~]# systemctl enable kube-scheduler.service
查看主节点各组件健康状态
    此段主要是配置下我们的kubectl,并且检查下我们的主节点各组件是否正常运行;
# 创建kubectl家目录
[root@node01 ~]# mkdir -p /usr/local/kubectl/bin
[root@node01 ~]# mv kubectl /usr/local/kubectl/bin/
[root@node01 ~]# chmod +x /usr/local/kubectl/bin/kubectl
# 配置环境变量
[root@node01 ~]# echo 'export PATH=$PATH:/usr/local/kubectl/bin' >> /etc/profile
[root@node01 ~]# source /etc/profile
# 配置自动补齐
[root@node01 ~]# kubectl completion bash > /usr/share/bash-completion/completions/kubectl
[root@node01 ~]# source /usr/share/bash-completion/completions/kubectl
# 检测组件状态
[root@node01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}

工作节点部署

    所谓工作节点,也就是Pod所调度的节点,该节点主要有几个组件,Docker Engine为我们的Pod提供基础支撑;
    Kubelet,Kubelet是运行在每个节点上的主要的节点代理,每个工作节点都会启动kubelet进程,用来处理Master节点下发到本节点的任务,按照PodSpec描述来管理Pod和其中的容器(PodSpec是用来描述一个Pod的YAML或者JSON对象);  
    kube-proxy,每个工作节点都会运行kube-proxy服务,它负责将访问service的TCP/UDP 数据流转发到后端的容器,如果有多个副本,kube-proxy也会实现负载均衡,k8s中service在逻辑上代表了后端多个Pod,外界通过service访问Pod,service接收到的请求也是通过kube-proxy实现的;
    Flannel,Flannel在kubernetes中的简单理解就是,flannel将本来多个宿主机中不可以相互通信的Pod放到了一个虚拟网络内,使得kubernetes集群内所有Pod仿佛是在同一个内网,可以相互通信。
部署docker
    Kubernetes会对经过充分验正的Docker程序版本进行认证,目前认证完成的最高版本是17.03,但docker-ce的最新版本已经高出了几个版本号。管理员可忽略此认证而直接使用最新版本的docker-ce程序,不过,建议根据后面的说明,将安装命令替换为安装17.03版;
# 在三台主机分别下载Docker的YUM源
[root@node3 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker.repo
# 如果要安装目前经过Kubernetes认证的docker-17版本,可以将上面第一条安装命令替换为如下命令:
# [root@node1 ~]# yum install -y --setopt=obsoletes=0 docker-ce-17.03.2.ce docker-ce-selinux-17.03.2.ce
# docker-ce-18.09.9
# 安装bash自动补齐和Docker Daemon
[root@node3 ~]# yum install -y bash-completion conntrack docker-ce
# 如果出现Requires: container-selinux >= 2:2.74,加入YUM源
[root@node03 ~]# cat >> /etc/yum.repos.d/docker.repo << EOF

[epel-extras]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=https://mirrors.aliyun.com/centos/7/extras/\$basearch/
failovermethod=priority
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=0
EOF
# 自动补齐
[root@node03 ~]# source /usr/share/bash-completion/bash_completion
[root@node03 ~]# source /usr/share/bash-completion/completions/docker
# 创建数据目录和配置
[root@node03 ~]# mkdir -p /{data,etc}/docker
# 配置Docker配置文件,此处的bip需要与后期的配置Flannel的地址池对应,Flannel默认是一个16位子网的网络,Flannel将其划分成多个24位的子网,那么这个24个子网就每一个都应该对应着每个工作节点中的Docker里的bip,例如node03是172.16.1.1/24,那么node04就是172.16.2.1/24,以此类推;
[root@node03 ~]# cat > /etc/docker/daemon.json << EOF
{
    "graph": "/data/docker",
    "storage-driver": "overlay2",
    "log-driver":"json-file",
    "log-opts": {"max-size":"500m", "max-file":"3"},
    "registry-mirrors": ["https://owcyje1z.mirror.aliyuncs.com"],
    "bip": "172.20.1.1/24"
}
EOF
# 启动Docker
[root@node03 ~]# systemctl start docker.service
[root@node03 ~]# systemctl enable docker
解决docker info警告问题
    优化Docker需要的相关内核参数;
[root@node1 ~]# cat >> /etc/sysctl.conf << EOF
# k8s
#
# iptables透明网桥的实现
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
EOF
[root@node1 ~]# sysctl -p
[root@node1 ~]# systemctl restart docker.service
部署kubelet
    这是一个相对复杂的服务,要部署kubelet服务,首选我们需要签发证书,因为我们的ApiServer还会主动去找kubelet主动的去查询节点的一些信息,所以kubelet自己也需要签发一套Server的证书,因为我们的运算节点上是必须安装kubelet的所以我们尽可能的为多个机器申请一点证书,如果后面新机器没有可用的kubelet证书了,那么需要重新签发证书,所有的kubelet节点也需要更新一下证书;
    我们kubelet在启动Pod的时候,需要一个基础镜像,来帮我们启动Pod,因为kubelet是去负责调度Docker让Docker引擎把这个容器真正的给拉起来的,但拉这个容器的时候必须有一个基础镜像,这个基础镜像就是边车,就比如说我们有一个小镜像,这个小镜像启动之后可以使得我们的kubelet能够去控制这么一个容器,从而我们的kubelet就可以进行设定一些Pod的基础环境了,比如UTS、NET、IPC等设定,让这个小容器把这个命名空间给占有;
# 在node01上创建一个
[root@node01 ~]# kubectl config set-cluster kubernetes-cluster --certificate-authority=/etc/kubernetes/certs/ca/ca.pem --embed-certs=true --server=https://172.16.1.1:7443 --kubeconfig=/etc/kubernetes/conf/bootstrap.kubeconfig
[root@node01 ~]# kubectl config set-credentials kubelet-bootstrap --token=5c98ab3b16c51ded9dd4f699029c923f --kubeconfig=/etc/kubernetes/conf/bootstrap.kubeconfig
[root@node01 ~]# kubectl config set-context default --cluster=kubernetes-cluster --user=kubelet-bootstrap --kubeconfig=/etc/kubernetes/conf/bootstrap.kubeconfig
[root@node01 ~]# kubectl config use-context default --kubeconfig=/etc/kubernetes/conf/bootstrap.kubeconfig
# 将用户绑定到system:node-bootstrapper这个节点引导权限组
[root@node01 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
# 创建kubelet家目录
[root@node03 ~]# mkdir -p /usr/local/kubelet/bin
[root@node03 ~]# mkdir -p /data/kubernetes/kubelet/{data,logs}
[root@node03 ~]# mv kubelet /usr/local/kubelet/bin/
[root@node03 ~]# chmod +x /usr/local/kubelet/bin/kubelet
# 创建kubelet启动脚本
[root@node03 ~]# cat << EOF > /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service


[Service]
ExecStart=/usr/local/kubelet/bin/kubelet \
        --cluster-dns=10.10.0.2  \
        --hostname-override=node03.cce.com \
        --kubeconfig=/etc/kubernetes/conf/kubelet.kubeconfig \
        --cert-dir=/etc/kubernetes/certs \
        --bootstrap-kubeconfig=/etc/kubernetes/conf/bootstrap.kubeconfig \
        --root-dir=/data/kubernetes/kubelet/data \
        --log-dir=/data/kubernetes/kubelet/logs \
        --logtostderr=false \
        --v=4 \
        --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
Restart=on-failure
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target
EOF
[root@node03 ~]# systemctl daemon-reload
[root@node03 ~]# systemctl start kubelet.service
# 在主节点node01上查看是否接收到node03的工作节点加入集群的请求
[root@node01 ~]# kubectl get csr
NAME                                                   AGE    REQUESTOR           CONDITION
node-csr-UNHa4oECdJumIhaxl7zRZqzOYKDJ10zBxTKoGRW6zwU   103s   kubelet-bootstrap   Pending
# 允许工作节点加入集群的请求
[root@node01 ~]# kubectl certificate approve node-csr-UNHa4oECdJumIhaxl7zRZqzOYKDJ10zBxTKoGRW6zwU
certificatesigningrequest.certificates.k8s.io/node-csr-UNHa4oECdJumIhaxl7zRZqzOYKDJ10zBxTKoGRW6zwU approved
# 给主节点打污点
# [root@node01 ~]# kubectl taint node svc01.wangque.com node-role.kubernetes.io/master=:NoSchedule
# 查看已加入集群的工作节点
[root@node01 ~]# kubectl label nodes  node03.cce.com node-role.kubernetes.io/worker=
node/node03.cce.com labeled
[root@node01 ~]# kubectl label nodes  node04.cce.com node-role.kubernetes.io/worker= 
node/node04.cce.com labeled
[root@node01 ~]# kubectl get nodes
NAME             STATUS   ROLES    AGE     VERSION
node03.cce.com   Ready    worker   26m     v1.17.3
node04.cce.com   Ready    worker   6m49s   v1.17.3
部署kube-proxy
    kube-proxy的作用主要是负责service的实现,具体来说,就是实现了内部从Pod到service和外部的从node port向service的访问,service是一组Pod的服务抽象,相当于一组Pod的LB,负责将请求分发给对应的Pod。service会为这个LB提供一个IP,一般称为cluster IP;
    kube-proxy管理sevice的Endpoints,该service对外暴露一个Virtual IP,也成为Cluster IP, 集群内通过访问这个Cluster IP:Port就能访问到集群内对应的serivce下的Pod,而service另外一个重要作用是,一个服务后端的Pods可能会随着生存灭亡而发生IP的改变,service的出现,给服务提供了一个固定的IP,而无视后端Endpoint的变化;
# 创建kube-proxy家目录,创建kube-proxy的证书,并使用与ApiServer同一个ca进行证书签发,从而可以直接通过这个证书去与ApiServer
[root@node01 ~]# mkdir /etc/kubernetes/certs/kube-proxy
# 创建一个kube-proxy的证书签署请求
[root@node01 ~]# cat > /etc/kubernetes/certs/kube-proxy/kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "cce",
      "OU": "ops"
    }
  ]
}
EOF
# 利用与ApiServer同一个ca进行证书签发
[root@node01 kube-proxy]# cfssl gencert -ca=/etc/kubernetes/certs/ca/ca.pem -ca-key=/etc/kubernetes/certs/ca/ca-key.pem -config=/etc/kubernetes/certs/etcd/ca-config.json -profile=client /etc/kubernetes/certs/kube-proxy/kube-proxy-csr.json | cfssljson -bare kube-proxy-client
[root@node01 kube-proxy]# ls
kube-proxy-client.csr  kube-proxy-client-key.pem  kube-proxy-client.pem  kube-proxy-csr.json
# 将该证书做成kubeconfig配置
[root@node01 ~]# kubectl config set-cluster kubernetes-cluster --certificate-authority=/etc/kubernetes/certs/ca/ca.pem --embed-certs=true --server=https://172.16.1.1:7443 --kubeconfig=/etc/kubernetes/conf/kube-proxy.kubeconfig
[root@node01 ~]# kubectl config set-credentials kube-proxy --client-certificate=/etc/kubernetes/certs/kube-proxy/kube-proxy-client.pem --client-key=/etc/kubernetes/certs/kube-proxy/kube-proxy-client-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/conf/kube-proxy.kubeconfig
[root@node01 ~]# kubectl config set-context default --cluster=kubernetes-cluster --user=kube-proxy --kubeconfig=/etc/kubernetes/conf/kube-proxy.kubeconfig
[root@node01 ~]# kubectl config use-context default --kubeconfig=/etc/kubernetes/conf/kube-proxy.kubeconfig
# 因为我们的kube-proxy是准备使用ipvs模式的,所以我们需要实现将该节点的ip_vs模块启用,创建内核模块载入相关的脚本文件/etc/sysconfig/modules/ipvs.modules,设定自动载入的内核模块;
[root@node03 ~]# cat >> /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
#
ipvs_mods_dir="/usr/lib/modules/\$(uname -r)/kernel/net/netfilter/ipvs"
for mod in \$(ls \$ipvs_mods_dir | grep -o "^[^.]*"); do
    /sbin/modinfo -F filename \$mod &> /dev/null
    if [ \$? -eq 0 ]; then
        /sbin/modprobe \$mod
    fi
done
EOF
# 修改文件权限,并手动为当前系统加载内核模块
[root@node03 ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
[root@node03 ~]# bash /etc/sysconfig/modules/ipvs.modules
# 安装kube-proxy
[root@node03 ~]# mkdir -p /usr/local/kube-proxy/bin
[root@node03 ~]# mv kube-proxy /usr/local/kube-proxy/bin
# 配置kube-proxy启动脚本,其中cluster-cidr为我们的节点16位地址池,默认的ipvs算法为rr
[root@node03 ~]# cat << EOF > /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/kube-proxy/bin/kube-proxy \
        --bind-address=0.0.0.0 \
        --cluster-cidr=172.20.0.0/16 \
        --hostname-override=node03.cce.com \
        --proxy-mode=ipvs \
        --log-dir=/data/kubernetes/kube-proxy \
        --kubeconfig=/etc/kubernetes/conf/kube-proxy.kubeconfig \
        --logtostderr=false \
        --ipvs-scheduler=rr \
        --v=4
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
[root@node03 ~]# systemctl daemon-reload
[root@node03 ~]# systemctl start kube-proxy.service 
部署flannel网络插件
    Flannel网络插件,flannel是一个专为kubernetes定制的三层网络解决方案,主要用于解决容器的跨主机通信问题,它的数据主要存储于etcd所以对于flannel也需要etcd客户端证书,对于配置,需要特地关注的一点的是iface,需要指定与集群通讯的网络接口是哪一个;
    经过测试,各大云厂商默认是不支持Flannel的host-gw这种网络模式的,但是阿里云可以通过VPC来打通,也可以通过官方推荐的方式来打通host-gw来实现,所以一般来讲,上云由于host-gw的特殊性,我们会采用VX-LAN模式;
# 将我们16位的cluster-cidr和通信模式事先写入我们的etcd的/coreos.com/network/config,当flannel启动的时候会自己去寻找并且进行相关的设置;
[root@node01 ~]# ETCDCTL_API=2 /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379" --key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem --ca-file=/etc/kubernetes/certs/ca/ca.pem set /coreos.com/network/config '{"Network": "172.20.0.0/16", "Backend": {"Type": "host-gw"}}'
[root@node01 ~]# ETCDCTL_API=2 /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379" --key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem --ca-file=/etc/kubernetes/certs/ca/ca.pem get /coreos.com/network/config 
# 创建Flannel家目录
[root@node01 ~]# mkdir /usr/local/flannel
[root@node01 ~]# tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /usr/local/flannel
# 将我们规划的16位的cluster-cidr和当前节点的子网写入subnet.env,供flannel读取
[root@node01 ~]# cat > /etc/kubernetes/conf/subnet.env << EOF
FLANNEL_NETWORK=172.20.0.0/16
FLANNEL_SUBNET=172.20.1.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
EOF
[root@node03 ~]# cat << EOF > /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Kubernetes Network Policy for Flannel
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/flannel/flanneld \
    --public-ip=172.16.1.4 \
    --etcd-endpoints=https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379 \
    --etcd-keyfile=/etc/kubernetes/certs/etcd/etcd-peer-key.pem \
    --etcd-certfile=/etc/kubernetes/certs/etcd/etcd-peer.pem \
    --etcd-cafile=/etc/kubernetes/certs/ca/ca.pem \
    --iface=eth1 \
    --subnet-file=/etc/kubernetes/conf/subnet.env \
    --healthz-port=2401
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
[root@node03 ~]# systemctl daemon-reload 
[root@node03 ~]# systemctl start flanneld.service
测试网络连通性
    到这一步,我们的Kubernetes集群已经大致部署完成,那么接下来就以一个Deployment多副本的示例来测试工作节点与工作节点之间的网络连通性,在此处,我们的工作节点主要是有node03、node04,下面主要是测试下Flannel是否工作正常,在node03节点测试是否能连通node04节点的Pod,在node04节点测试是否能够连通node03节点的Pod;
# 在主节点上创建一个Deployment,用于测试
[root@node01 ~]# cat cce.yaml 
apiVersion: v1
kind: Namespace
metadata:
  name: cce
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: repod
  namespace: cce
  labels:
    app: relabel
spec:
  replicas: 3
  selector:
    matchLabels:
      app: web
  template:
    metadata:
      name: ikubernets
      namespace: cce
      labels:
        app: web
      annotations:
        author: caichangen
    spec:
      restartPolicy: Always
      containers:
      - name: repodbackend
        image: ikubernetes/myapp:v1
        imagePullPolicy: IfNotPresent
[root@node01 ~]# kubectl apply -f cce.yaml
# 查看运行起来的Pod
[root@node01 ~]# kubectl get pods -n cce -o wide 
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE             NOMINATED NODE   READINESS GATES
repod-7b455d5567-7lj9p   1/1     Running   0          53s   172.20.1.3   node03.cce.com   <none>           <none>
repod-7b455d5567-lgc8t   1/1     Running   0          53s   172.20.2.2   node04.cce.com   <none>           <none>
repod-7b455d5567-w5grv   1/1     Running   0          53s   172.20.1.2   node03.cce.com   <none>           <none>
# 在node03节点测试网络连通性
[root@node03 ~]# curl 172.20.1.2/hostname.html
repod-7b455d5567-w5grv
[root@node03 ~]# curl 172.20.2.2/hostname.html
repod-7b455d5567-lgc8t
# 在node04上测试网络连通性
[root@node04 ~]# curl 172.20.1.2/hostname.html
repod-7b455d5567-w5grv
[root@node04 ~]# curl 172.20.2.2/hostname.html
repod-7b455d5567-lgc8t

公共组件部署

    整个Kubernetes涉及到很多组件,那么在此主要使用的是两个组件coredns、ingress,对于coredns主要用于Kubernetes集群内部的域名解析系统,使Kubernetes能够在内部使用私有域名来区别应用,那么对于Ingress,其主要作用是用于http等协议流量转发,作为一个应用的网关服务;
部署coredns
    CoreDNS其实就是一个DNS服务,而DNS作为一种常见的服务发现手段,所以很多开源项目以及工程师都会使用CoreDNS为集群提供服务发现的功能,Kubernetes就在集群中使用CoreDNS解决服务发现的问题,CoreDNS服务监视kubernetes api , 为每一个service创建dns记录用于域名解析;这样访问Pod资源资源只需要访问service名即可,而不需要关系pod容器的ip地址的变化,下面就开始部署CoreDns为我们的Kubernetes集群提供名称解析服务;
    需要注意的是,CoreDNS默认只做Kubernetes内网解析,那么针对外网的解析,是通过Forward来实现的,默认是转发到宿主机设定的DNS,也就是/etc/resolv.conf文件,所以,我们做是呀CoreDNS的时候,一般是要设定一个共有DNS的,因为一般路由器是不接受DNS转发请求的,所以,我们做配置CoreDNS的时候,需要配置/etc/resolv.conf为namespace 223.5.5.5,并且如果/etc/resolv.conf文件有search字段,得删除掉,否则会出现外网域名无法解析的情况;
# 创建配置目录
[root@node01 ~]# mkdir /etc/kubernetes/coredns
# 下载模版文件
[root@node01 ~]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed -O /etc/kubernetes/coredns/coredns.yaml.sed
# 下载处理脚本
[root@node01 ~]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh -O /etc/kubernetes/coredns/deploy.sh
# coredns.yaml.sed是官方提供的一个模版文件,我们需要执行脚本,带入自定义dns的ip,生成一个coredns的YAML格式的配置清单,-i参数为coredns的service地址,-r为service-cluster-ip-range也就是service地址池范围,-t为模版文件;
[root@node01 coredns]# bash deploy.sh -i "10.10.0.2" -r "10.10.0.0/16" -s -t coredns.yaml.sed > coredns.yaml
# 部署coredns
[root@node01 ~]# kubectl apply -f /etc/kubernetes/coredns/coredns.yaml
[root@node02 ~]# kubectl get pods -n kube-system                       
NAME                      READY   STATUS    RESTARTS   AGE
coredns-759df9d7b-vlqnx   1/1     Running   0          11s
# 在工作节点测试coredns解析,直接用coredns解析我们的集群内置名称
[root@node03 ~]# dig -t A kubernetes.default.svc.cluster.local. @10.10.0.2 +short
10.10.0.1
部署Ingress
    Ingress仅用于定义流量转发和调度的通用格式的配置信息,他们需要转换为的特定的具有http协议转发和调度功能的应用程序,例如Nginx、Haproxy、Traefik等的配置文件,并由相应的应用程序生效相应的配置后完成流量转发;
# 直接下载最新的nginx Ingress控制器
[root@node01 ~]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yaml -O /etc/kubernetes/conf/ingress.yaml
# 国内网络原因,修改镜像源
[root@node01 ~]# grep 'image' /etc/kubernetes/conf/ingress.yaml
          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0
[root@node01 ~]# sed -i 's@quay.io@quay.mirrors.ustc.edu.cn@g' /etc/kubernetes/conf/ingress.yaml
[root@node01 ~]# grep 'quay' /etc/kubernetes/conf/ingress.yaml   
          image: quay.mirrors.ustc.edu.cn/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0
# 部署之前需要将ingress的网络模式给修改一下,这里修改为hostNetwork,并且限定只能在node04上运行
[root@node01 ~]# cat /etc/kubernetes/conf/ingress.yaml
...
      hostNetwork: true
      nodeSelector:
        kubernetes.io/hostname: node04.cce.com
...
# 部署Ingress
[root@node01 ~]# kubectl apply -f /etc/kubernetes/conf/ingress.yaml
[root@node01 ~]# kubectl get all -n ingress-nginx 
NAME                                            READY   STATUS    RESTARTS   AGE
pod/nginx-ingress-controller-6ddf7d6bc4-gssdp   1/1     Running   0          91s

NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginx-ingress-controller   1/1     1            1           92s

NAME                                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/nginx-ingress-controller-6ddf7d6bc4   1         1         1       92s

测试集群健康

    接下来,主要就是拿一个Ingress实例来测试下整个实例,因为整个Ingress实例,包含了controller、service、flannel、ingress等组件;
# 提供一个测试清单
[root@node01 ~]# cat > test.yaml << EOF
# 创建Namespace
apiVersion: v1
kind: Namespace
metadata:
  name: ingress-web
---
# 创建Service,此Service主要是辅助Ingress挑选Pod
apiVersion: v1
kind: Service
metadata:
  name: ingress-service
  namespace: ingress-web
spec:
  selector:
    application.name: nginx
  ports:
  - name: http
    port: 80
    targetPort: 80
---
# 创建后端Pod池
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web
  namespace: ingress-web
spec:
  replicas: 3
  selector:
    matchLabels:
      application.name: nginx
  template:
    metadata:
      name: backend
      namespace: ingress-web
      labels:
        application.name: nginx
    spec:
      containers:
      - name: backend-node
        image: ikubernetes/myapp:v1
        imagePullPolicy: IfNotPresent
---
# 创建Ingress配置,此配置会直接提交到Ingress Controller并动态修改里面七层应用的配置
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: web-ingress
  namespace: ingress-web
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: / # 这个annotations主要的作用是url重定向
    kubernetes.io/ingress.class: "nginx" # 指定ingress的类别为Nginx
spec:
  rules:
  - host: www.cce.com
    http:
      paths:
      - path: /
        backend:
          serviceName: ingress-service
          servicePort: 80
EOF
[root@node01 ~]# kubectl apply -f test.yaml
[root@node01 ~]# kubectl get pods -n ingress-web 
NAME                   READY   STATUS    RESTARTS   AGE
web-84dd646bcb-mhn94   1/1     Running   0          3m13s
web-84dd646bcb-x4qpc   1/1     Running   0          3m13s
web-84dd646bcb-zn2qk   1/1     Running   0          3m13s
# 随便在一台机器加入hosts解析,域名为上面配置的www.cce.com,IP为node4主机的IP(ingress-controller在node4上)
[root@node01 ~]# grep node4 /etc/hosts
172.16.1.4 node4 node04 node04.cce.com www.cce.com
# 测试访问
[root@node01 ~]# curl http://www.cce.com/hostname.html
web-84dd646bcb-mhn94
[root@node01 ~]# curl http://www.cce.com/hostname.html
web-84dd646bcb-x4qpc
[root@node01 ~]# curl http://www.cce.com/hostname.html
web-84dd646bcb-zn2qk

创建超管账号

    创建一个超级管理员账号,该账号主要用于外部链接,比如我们可以通过我们的MacBook内置的shell来进行连接该集群,那么接下来就需要创建一个外部链接的kubeconfig文件;
# 创建证书
[root@node01 ~]# mkdir /etc/kubernetes/{certs,conf}/user
[root@node01 ~]# cat > /etc/kubernetes/certs/user/super-csr.json << EOF
{
    "CN": "super",
    "hosts": [],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}
EOF
[root@node01 ~]# cd /etc/kubernetes/certs/user
[root@node01 ~]# cfssl gencert -ca=/etc/kubernetes/certs/ca/ca.pem -ca-key=/etc/kubernetes/certs/ca/ca-key.pem -config=/etc/kubernetes/certs/etcd/ca-config.json -profile=client super-csr.json| cfssljson -bare super
# 生成集群信息配置文件
[root@node01 ~]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/certs/ca/ca.pem --embed-certs=true --server=https://172.16.1.1:6443 --kubeconfig=/etc/kubernetes/conf/user/super.kubeconfig
# 设置该kubeconfig文件的用户
[root@node01 ~]# kubectl config set-credentials super --client-certificate=/etc/kubernetes/certs/user/super.pem --client-key=/etc/kubernetes/certs/user/super-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/conf/user/super.kubeconfig
# 绑定用户和管理的集群
[root@node01 ~]# kubectl config set-context super-context --cluster=kubernetes --user=super --kubeconfig=/etc/kubernetes/conf/user/super.kubeconfig
# 设定默认使用的上下文
[root@node01 ~]# kubectl config use-context super-context --kubeconfig=/etc/kubernetes/conf/user/super.kubeconfig
# 创建kubernetes用户和角色的绑定
[root@node01 ~]# cat > /etc/kubernetes/conf/user/super.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: super
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: super
EOF
[root@node01 ~]# kubectl apply -f /etc/kubernetes/conf/user/super.yaml
# 接下来,我们就可以使用/etc/kubernetes/conf/user/super.kubeconfig这个kubeconfig文件来进行连接集群了,kubectl默认查找的啥${HOME}/.kube/config

kubelet证书续期

    默认情况下,kube-controller-manager默认会给各个node的kubelet颁发期限一年的证书,那么在这种情况下,在证书快到期的时候,就需要对其进行续期操作,续期的具体操作如下;
1、修改kube-controller-manager的配置,添加--experimental-cluster-signing-duration=438000h该配置,让kube-controller-manager默认给kubelet分配的证书有效期为50年(实际只会分配25年),然后重启kube-controller-manager,如果新增--feature-gates=RotateKubeletServerCertificate=true,会自动轮换到新到证书,但是需要重启;
2、删除kubelet证书以及认证相关文件
        ~]# rm -f /etc/kubernetes/certs/kubelet*
        ~]# rm -f /etc/kubernetes/conf/kubelet*
3、重启kubelet;
4、查看新证书期限;
        ~]# openssl x509 -in /etc/kubernetes/certs/kubelet-client-current.pem -noout -text

新增etcd节点

    新增etcd节点其实也很简单,我们只需要修改下https证书,并且重启etcd即可,那么对于应用呢,其实不会造成任何影响,接下来就演示一下,如何热扩展Kubernetes存储etcd节点;
# 修改原证书签发文件
[root@node1 ~]# cat /etc/kubernetes/certs/etcd/etcd-peer-csr.json 
{
    "CN": "etcd",
    "hosts": [
    "node1.cce.com",
    "node2.cce.com",
    "node3.cce.com",
    "node4.cce.com",  # 添加新主机的主机名
    "172.16.1.1",
    "172.16.1.2",
    "172.16.1.3",
    "172.16.1.4" # 添加新主机的IP
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
[root@node1 ~]# cd /etc/kubernetes/certs/etcd
# 删除原有证书
[root@node1 etcd]# ls
ca-config.json  etcd-peer.csr  etcd-peer-csr.json  etcd-peer-key.pem  etcd-peer.pem
[root@node1 etcd]# rm -f etcd-peer.* etcd-peer-key.pem
# 重新生成证书
[root@node1 etcd]# cfssl gencert -ca=/etc/kubernetes/certs/ca/ca.pem -ca-key=/etc/kubernetes/certs/ca/ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssljson -bare etcd-peer
# 将证书分发到各节点
[root@node1 etcd]# scp etcd-peer.* etcd-peer-key.pem node2:/etc/kubernetes/certs/etcd/
[root@node1 etcd]# scp etcd-peer.* etcd-peer-key.pem node3:/etc/kubernetes/certs/etcd/
# 重启etcd测试新证书是否有误
[root@node1 ~]# systemctl restart etcd
[root@node2 ~]# systemctl restart etcd
[root@node3 ~]# systemctl restart etcd
# 检测集群
[root@node1 ~]# ETCDCTL_API=2 /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379" --key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem --ca-file=/etc/kubernetes/certs/ca/ca.pem member list
71ae8e01d16af95d: name=etcd3 peerURLs=https://172.16.1.3:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.3:2379 isLeader=true
7643de5934f96269: name=etcd2 peerURLs=https://172.16.1.2:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.2:2379 isLeader=false
a0e08efea825151f: name=etcd1 peerURLs=https://172.16.1.1:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.1:2379 isLeader=false
[root@node1 ~]# ETCDCTL_API=2 /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379" --key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem --ca-file=/etc/kubernetes/certs/ca/ca.pem cluster-health
member 71ae8e01d16af95d is healthy: got healthy result from https://172.16.1.3:2379
member 7643de5934f96269 is healthy: got healthy result from https://172.16.1.2:2379
member a0e08efea825151f is healthy: got healthy result from https://172.16.1.1:2379
cluster is healthy  # 新证书已经正确被应用
# 在新节点上部署etcd
[root@node1 ~]# scp -r /usr/local/etcd node4:/usr/local/
# 修改所有原节点的配置,预增加新的etcd节点(所有原节点)
[root@node1 ~]# cat /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/local/etcd/bin/etcd \
--name etcd1 \
--data-dir /data/etcd/data \
--listen-peer-urls https://172.16.1.1:2380 \
--listen-client-urls https://127.0.0.1:2379,https://172.16.1.1:2379 \
--quota-backend-bytes 8000000000 --initial-advertise-peer-urls https://172.16.1.1:2380 \
--advertise-client-urls https://172.16.1.1:2379,http://127.0.0.1:2379 \
--initial-cluster etcd1=https://172.16.1.1:2380,etcd2=https://172.16.1.2:2380,etcd3=https://172.16.1.3:2380,etcd4=https://172.16.1.4:2380 \  # 新增node4节点
--log-package-levels=DEBUG \
--cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem \
--key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem \
--peer-cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem \
--peer-key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem \
--trusted-ca-file=/etc/kubernetes/certs/ca/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/certs/ca/ca.pem \
--peer-client-cert-auth \
--enable-v2=true \
--log-output=stdout

[Install]
WantedBy=multi-user.target
# 所有节点重载启动脚本
[root@node1 ~]# systemctl daemon-reload
# 在新节点创建证书目录
[root@node4 ~]# mkdir -p /etc/kubernetes/certs
# 在原节点的证书复制到新节点
[root@node1 ~]# scp -r /etc/kubernetes/certs/ca node4:/etc/kubernetes/certs
[root@node1 ~]# scp -r /etc/kubernetes/certs/etcd node4:/etc/kubernetes/certs
# 在原集群中添加新etcd
[root@node1 ~]# ETCDCTL_API=2 /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379" --key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem --ca-file=/etc/kubernetes/certs/ca/ca.pem member add etcd4 https://172.16.1.4:2380
Added member named etcd4 with ID 24f9b9c49eeaebc5 to cluster

ETCD_NAME="etcd4"
ETCD_INITIAL_CLUSTER="etcd4=https://172.16.1.4:2380,etcd3=https://172.16.1.3:2380,etcd2=https://172.16.1.2:2380,etcd1=https://172.16.1.1:2380"
ETCD_INITIAL_CLUSTER_STATE="existing"
# 在新节点上新增启动脚本
[root@node4 ~]# cat << EOF > /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/local/etcd/bin/etcd \
--name etcd4 \
--data-dir /data/etcd/data \
--listen-peer-urls https://172.16.1.4:2380 \
--listen-client-urls https://127.0.0.1:2379,https://172.16.1.4:2379 \
--quota-backend-bytes 8000000000 \
--initial-advertise-peer-urls https://172.16.1.4:2380 \
--advertise-client-urls https://172.16.1.4:2379,http://127.0.0.1:2379 \
--initial-cluster etcd1=https://172.16.1.1:2380,etcd2=https://172.16.1.2:2380,etcd3=https://172.16.1.3:2380,etcd4=https://172.16.1.4:2380 \
--log-package-levels=DEBUG \
--cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem \
--key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem \
--peer-cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem \
--peer-key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem \
--trusted-ca-file=/etc/kubernetes/certs/ca/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/certs/ca/ca.pem \
--peer-client-cert-auth \
--log-output=stdout \
--enable-v2=true \
--initial-cluster-state=existing  # 需要添加该配置

[Install]
WantedBy=multi-user.target
EOF
# 所有节点重启etcd(新节点会自动同步老节点的数据)
[root@node1 ~]# systemctl restart etcd.service
[root@node2 ~]# systemctl restart etcd.service
[root@node3 ~]# systemctl restart etcd.service
[root@node4 ~]# systemctl restart etcd.service
# 检测集群是否正常
[root@node1 ~]# ETCDCTL_API=2 /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379,https://172.16.1.4:2379" --key-file=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert-file=/etc/kubernetes/certs/etcd/etcd-peer.pem --ca-file=/etc/kubernetes/certs/ca/ca.pem member list 
24f9b9c49eeaebc5: name=etcd4 peerURLs=https://172.16.1.4:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.4:2379 isLeader=false # 新节点已成功接入集群
71ae8e01d16af95d: name=etcd3 peerURLs=https://172.16.1.3:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.3:2379 isLeader=false
7643de5934f96269: name=etcd2 peerURLs=https://172.16.1.2:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.2:2379 isLeader=false
a0e08efea825151f: name=etcd1 peerURLs=https://172.16.1.1:2380 clientURLs=http://127.0.0.1:2379,https://172.16.1.1:2379 isLeader=true
# 修改apiserver和flannel的etcd配置之后,查看Kubernetes集群是否正常
[root@node1 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-3               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}

etcd备份脚本

    附上一个etcd集群的备份脚本;
[root@node01 ~]# cat /data/scripts/etcd_backup_script.sh 
#!/bin/bash
#
TIME=$(date +%s)
BACKUPDIR=/data/backup/etcd
# ENDPOINTS="https://192.168.0.110:2379,https://192.168.0.180:2379,https://192.168.0.58:2379"
ENDPOINTS="https://172.16.1.1:2379"
KEYFILE=/etc/kubernetes/certs/etcd/etcd-peer-key.pem
CERTFILE=/etc/kubernetes/certs/etcd/etcd-peer.pem
CACERT=/etc/kubernetes/certs/ca/ca.pem
export ETCDCTL_API=3

checkError() {
    if [ $? -ne 0 ];then
        echo "backup etcd data is failed , please check scripts "
        exit 1
    fi
}

backupHandler() {
    if [ ! -d ${BACKUPDIR}/${TIME} ];then
            mkdir -p ${BACKUPDIR}/${TIME}
    fi
    # ETCDCTL_API=3 /usr/local/etcd/bin/etcdctl --endpoints=${ENDPOINTS} --key=${KEYFILE} --cert=${CERTFILE} --cacert=${CACERT} snapshot save ${BACKUPDIR}/${TIME}/snapshot.db >> /dev/null 2<&1
    /usr/local/etcd/bin/etcdctl --endpoints="${ENDPOINTS}" --key=${KEYFILE} --cert=${CERTFILE} --cacert=${CACERT} snapshot save ${BACKUPDIR}/${TIME}/snapshot.db >> /dev/null 2<&1
    checkError
}

removeExpireFile() {
    find ${BACKUPDIR}/*.gz -ctime +7 -exec rm -f {} \; >> /dev/null 2<&1
}

archiveFile() {
    cd ${BACKUPDIR} && tar czf ${TIME}.tar.gz ${TIME} --remove-files
}

if [ $USER = 'root' ];then
    removeExpireFile
    backupHandler
    checkError
    archiveFile
    checkError
    echo "backup etcd data is successfuly , backup file save path is ${BACKUPDIR} directory , filename is ${TIME}.tar.gz "
else
    echo "please switch to administrator operation !"
fi

# /usr/local/etcd/bin/etcdctl --endpoints="https://172.16.1.1:2379,https://172.16.1.2:2379,https://172.16.1.3:2379" --key=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert=/etc/kubernetes/certs/etcd/etcd-peer.pem --cacert=/etc/kubernetes/certs/ca/ca.pem snapshot save snapshot.db
# etcd1=https://192.168.0.110:2379,etcd2=https://192.168.0.180:2379,etcd3=https://192.168.0.58:2379
# /usr/local/etcd/bin/etcdctl --endpoints="https://192.168.0.110:2379,https://192.168.0.180:2379,https://192.168.0.58:2379" --key=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert=/etc/kubernetes/certs/etcd/etcd-peer.pem --cacert=/etc/kubernetes/certs/ca/ca.pem --initial-cluster etcd1=https://192.168.0.110:2380,etcd2=https://192.168.0.180:2380,etcd3=https://192.168.0.58:2380  --initial-advertise-peer-urls=https://192.168.0.110:2380 snapshot restore /root/snapshot.db --data-dir=/data/etcd/data_restore --name etcd1
# /usr/local/etcd/bin/etcdctl --endpoints="https://192.168.0.110:2379,https://192.168.0.180:2379,https://192.168.0.58:2379" --key=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert=/etc/kubernetes/certs/etcd/etcd-peer.pem --cacert=/etc/kubernetes/certs/ca/ca.pem --initial-cluster etcd1=https://192.168.0.110:2380,etcd2=https://192.168.0.180:2380,etcd3=https://192.168.0.58:2380  --initial-advertise-peer-urls=https://192.168.0.180:2380 snapshot restore /root/snapshot.db --data-dir=//data/etcd/data_restore --name etcd2
# /usr/local/etcd/bin/etcdctl --endpoints="https://192.168.0.110:2379,https://192.168.0.180:2379,https://192.168.0.58:2379" --key=/etc/kubernetes/certs/etcd/etcd-peer-key.pem --cert=/etc/kubernetes/certs/etcd/etcd-peer.pem --cacert=/etc/kubernetes/certs/ca/ca.pem --initial-cluster etcd1=https://192.168.0.110:2380,etcd2=https://192.168.0.180:2380,etcd3=https://192.168.0.58:2380  --initial-advertise-peer-urls=https://192.168.0.58:2380 snapshot restore /root/snapshot.db --data-dir=/data/etcd/data_restore --name etcd3

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注