安装K8s并配置主控节点(k8s学习五)


安装K8s

sh0-30 sh0-31 两台机子装k8s

下载

cd /opt/src
wget https://dl.k8s.io/v1.20.4/kubernetes-server-linux-amd64.tar.gz

科学上网

解压到指定目录, 并引用

tar xf kubernetes-server-linux-amd64.tar.gz -C /opt/
cd ..
mv kubernetes/ kubernetes-v1.20.4
ln -s kubernetes-v1.20.4/ kubernetes

删掉源码包

cd kubernetes
rm -rf kubernetes-src.tar.gz

在运维主机签发证书

在sh0-50

cd /opt/certs/
vim /opt/certs/client-csr.json

内容

{
    "CN": "k8s-node",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shenzhen",
            "L": "shenzhen",
            "O": "gy",
            "OU": "ops"
        }
    ]
}

签发client

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client


-rw-r--r--. 1 root root  836 Feb 23 09:14 ca-config.json
-rw-r--r--. 1 root root  997 Feb 21 05:53 ca.csr
-rw-r--r--. 1 root root  330 Feb 21 05:47 ca-csr.json
-rw-------. 1 root root 1679 Feb 21 05:53 ca-key.pem
-rw-r--r--. 1 root root 1346 Feb 21 05:53 ca.pem
-rw-r--r--. 1 root root  997 Mar  3 10:45 client.csr
-rw-r--r--. 1 root root  282 Mar  3 10:45 client-csr.json
-rw-------. 1 root root 1675 Mar  3 10:45 client-key.pem
-rw-r--r--. 1 root root 1367 Mar  3 10:45 client.pem
-rw-r--r--. 1 root root 1066 Feb 23 09:27 etcd-peer.csr
-rw-r--r--. 1 root root  377 Feb 23 09:23 etcd-peer-csr.json
-rw-------. 1 root root 1675 Feb 23 09:27 etcd-peer-key.pem
-rw-r--r--. 1 root root 1432 Feb 23 09:27 etcd-peer.pem

接着是apiserver

vim /opt/certs/apiserver-csr.json

内容

{
    "CN": "k8s-apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "192.168.0.43",  // 作为VIP
        "192.168.0.30",
        "192.168.0.31",
        "192.168.0.32"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shenzhen",
            "L": "shenzhen",
            "O": "gy",
            "OU": "ops"
        }
    ]
}

签发apiserver

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver


-rw-r--r--. 1 root root 1253 Mar  3 10:49 apiserver.csr
-rw-r--r--. 1 root root  552 Mar  3 10:49 apiserver-csr.json
-rw-------. 1 root root 1679 Mar  3 10:49 apiserver-key.pem
-rw-r--r--. 1 root root 1602 Mar  3 10:49 apiserver.pem
-rw-r--r--. 1 root root  836 Feb 23 09:14 ca-config.json
-rw-r--r--. 1 root root  997 Feb 21 05:53 ca.csr
-rw-r--r--. 1 root root  330 Feb 21 05:47 ca-csr.json
-rw-------. 1 root root 1679 Feb 21 05:53 ca-key.pem
-rw-r--r--. 1 root root 1346 Feb 21 05:53 ca.pem
-rw-r--r--. 1 root root  997 Mar  3 10:45 client.csr
-rw-r--r--. 1 root root  282 Mar  3 10:45 client-csr.json
-rw-------. 1 root root 1675 Mar  3 10:45 client-key.pem
-rw-r--r--. 1 root root 1367 Mar  3 10:45 client.pem
-rw-r--r--. 1 root root 1066 Feb 23 09:27 etcd-peer.csr
-rw-r--r--. 1 root root  377 Feb 23 09:23 etcd-peer-csr.json
-rw-------. 1 root root 1675 Feb 23 09:27 etcd-peer-key.pem
-rw-r--r--. 1 root root 1432 Feb 23 09:27 etcd-peer.pem

证书下发

for i in 30 31;do echo sh0-$i;ssh sh0-$i "mkdir /opt/kubernetes/server/bin/certs";scp apiserver-key.pem apiserver.pem ca-key.pem ca.pem client-key.pem client.pem sh0-$i:/opt/kubernetes/server/bin/certs/;done

ApiServer 配置

在sh0-30 sh0-31

cd /opt/kubernetes/server/bin
mkdir conf
cd conf
vim audit.yaml

使用 :set paste 进入粘贴模式防止缩进异常

内容

apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"

配置启动脚本

vim /opt/kubernetes/server/bin/kube-apiserver-startup.sh

#!/bin/bash

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

/opt/kubernetes/server/bin/kube-apiserver \
    --apiserver-count 2 \
    --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
    --audit-policy-file ./conf/audit.yaml \
    --authorization-mode RBAC \
    --client-ca-file ./certs/ca.pem \
    --requestheader-client-ca-file ./certs/ca.pem \
    --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
    --etcd-cafile ./certs/ca.pem \
    --etcd-certfile ./certs/client.pem \
    --etcd-keyfile ./certs/client-key.pem \
    --etcd-servers https://192.168.0.21:2379,https://192.168.0.30:2379,https://192.168.0.31:2379 \
    --service-account-key-file ./certs/ca-key.pem \
    --service-cluster-ip-range 192.168.0.0/16 \
    --service-node-port-range 3000-29999 \
    --target-ram-mb=1024 \
    --kubelet-client-certificate ./certs/client.pem \
    --kubelet-client-key ./certs/client-key.pem \
    --log-dir  /data/logs/kubernetes/kube-apiserver \
    --tls-cert-file ./certs/apiserver.pem \
    --tls-private-key-file ./certs/apiserver-key.pem \
    --service-account-issuer linty \
    --service-account-signing-key-file ./certs/ca-key.pem \
    --api-audiences linty \
    --v 2
chmod u+x /opt/kubernetes/server/bin/kube-apiserver-startup.sh

自启动

vim /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver-0-30]
command=/opt/kubernetes/server/bin/kube-apiserver-startup.sh
numprocs=1
directory=/opt/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
mkdir -p /data/logs/kubernetes/kube-apiserver/
supervisorctl update

L4 Proxy

关于L4可看 https://zhuanlan.zhihu.com/p/53438208

SH0-20 SH0-21

yum install -y nginx
vim /etc/nginx/nginx.conf

在末尾添加

stream {
    log_format proxy '$time_local|$remote_addr|$upstream_addr|$protocol|$status|'
                     '$session_time|$upstream_connect_time|$bytes_sent|$bytes_received|'
                     '$upstream_bytes_sent|$upstream_bytes_received' ;

    upstream kube-apiserver {
        server 192.168.0.30:6443     max_fails=3 fail_timeout=30s;
        server 192.168.0.31:6443     max_fails=3 fail_timeout=30s;
    }
    server {
        listen 7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
        access_log /var/log/nginx/proxy.log proxy;
    }
}
systemctl enable nginx
systemctl start nginx

KeepAlived

安装KeepAlive

yum install -y keepalived
vim /etc/keepalived/check_port.sh
#!/bin/bash
CHK_PORT=$1
if [ -n "$CHK_PORT" ]; then
    PORT_PROCESS=`ss -lnt | grep $CHK_PORT | wc -l`
    if [ $PORT_PROCESS -eq 0 ]; then
        echo "Port $CHK_PORT Is Not Used, End."
        exit 1
    fi
else
    echo "Check Port Cant Be Empty!"
fi

chmod +x /etc/keepalived/check_port.sh

vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
    router_ip 192.168.0.20
}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 192.168.0.20
    nopreempt

    authentication {
      auth_type PASS
      auth_pass 11111111
    }
    track_script {
      chk_nginx
    }
    virtual_ipaddress {
      192.168.0.43
    }
}

vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
    router_ip 192.168.0.21
}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 251
    priority 90
    advert_int 1
    mcast_src_ip 192.168.0.21

    authentication {
      auth_type PASS
      auth_pass 11111111
    }
    track_script {
      chk_nginx
    }
    virtual_ipaddress {
      192.168.0.43
    }
}
systemctl enable keepalived
systemctl start keepalived

接着ip add就可以看到VIP了

[root@sh0-20 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:69:d1:d4 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.20/24 brd 192.168.0.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.0.43/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::9c78:5264:a1f8:f376/64 scope link noprefixroute
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:f1:97:0f:08 brd ff:ff:ff:ff:ff:ff
    inet 172.7.20.1/24 brd 172.7.20.255 scope global docker0
       valid_lft forever preferred_lft forever

通常如果master服务死掉后backup会变成master,但是当master服务又好了的时候 master此时会抢占VIP,这样就会发生两次切换对业务繁忙的网站来说是不好的。所以我们要在配置文件加入 nopreempt 非抢占,但是这个参数只能用于state 为backup,故我们在用HA的时候最好master 和backup的state都设置成backup 让其通过priority来竞争。

Kube Controller Manager

sh0-30 sh0-31

vim /opt/kubernetes/server/bin/kube-controller-manager-startup.sh

#!/bin/sh
WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

/opt/kubernetes/server/bin/kube-controller-manager --cluster-cidr 172.7.0.0/16 --log-dir /data/logs/kubernetes/kube-controller-manager --master http://127.0.0.1:8080  --service-account-private-key-file ./certs/ca-key.pem  --service-cluster-ip-range 192.168.0.0/16  --root-ca-file ./certs/ca.pem  --v 2
chmod u+x /opt/kubernetes/server/bin/kube-controller-manager-startup.sh
mkdir -p /data/logs/kubernetes/kube-controller-manager
vim /etc/supervisord.d/kube-controller-manager.ini
[program:kube-controller-manager-0-30]
command=/opt/kubernetes/server/bin/kube-controller-manager-startup.sh             ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=30                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                                                              ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log  ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
supervisorctl update

文章作者: Linty
版权声明: 本博客所有文章除特別声明外,均采用 CC BY 4.0 许可协议。转载请注明来源 Linty !
  目录