您好,登錄后才能下訂單哦!
[root@master01 k8s]# cd /mnt/ //進(jìn)入宿主機(jī)掛載目錄
[root@master01 mnt]# ls
etcd-cert etcd-v3.3.10-linux-amd64.tar.gz k8s-cert.sh master.zip
etcd-cert.sh flannel.sh kubeconfig.sh node.zip
etcd.sh flannel-v0.10.0-linux-amd64.tar.gz kubernetes-server-linux-amd64.tar.gz
[root@master01 mnt]# cp master.zip /root/k8s/ //復(fù)制壓縮包到k8s工作目錄
[root@master01 mnt]# cd /root/k8s/ //進(jìn)入k8s工作目錄
[root@master01 k8s]# ls
cfssl.sh etcd-v3.3.10-linux-amd64 kubernetes-server-linux-amd64.tar.gz
etcd-cert etcd-v3.3.10-linux-amd64.tar.gz master.zip
etcd.sh flannel-v0.10.0-linux-amd64.tar.gz
[root@master01 k8s]# unzip master.zip //解壓壓縮包
Archive: master.zip
inflating: apiserver.sh
inflating: controller-manager.sh
inflating: scheduler.sh
[root@master01 k8s]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p //在master01中創(chuàng)建工作目錄,之前在node節(jié)點(diǎn)中同樣也創(chuàng)建過(guò)工作目錄
[root@master01 k8s]# mkdir k8s-cert //創(chuàng)建自簽證書(shū)目錄
[root@master01 k8s]# cp /mnt/k8s-cert.sh /root/k8s/k8s-cert //將掛載的自簽證書(shū)腳本移動(dòng)到k8s工作目錄中的自簽證書(shū)目錄
[root@master01 k8s]# cd k8s-cert //進(jìn)入目錄
[root@master01 k8s-cert]# vim k8s-cert.sh //編輯拷貝過(guò)來(lái)的腳本文件
...
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.80.12", //更改地址為master01IP地址
"192.168.80.11", //添加地址為master02IP地址,為之后我們要做的多節(jié)點(diǎn)做準(zhǔn)備
"192.168.80.100", //添加vrrp地址,為之后要做的負(fù)載均衡做準(zhǔn)備
"192.168.80.13", //更改地址為node01節(jié)點(diǎn)IP地址
"192.168.80.14", //更改地址為node02節(jié)點(diǎn)IP地址
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
...
:wq
[root@master01 k8s-cert]# bash k8s-cert.sh //執(zhí)行腳本,生成證書(shū)
2020/02/10 10:59:17 [INFO] generating a new CA key and certificate from CSR
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:17 [INFO] encoded CSR
2020/02/10 10:59:17 [INFO] signed certificate with serial number 10087572098424151492431444614087300651068639826
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:17 [INFO] encoded CSR
2020/02/10 10:59:17 [INFO] signed certificate with serial number 125779224158375570229792859734449149781670193528
2020/02/10 10:59:17 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:17 [INFO] encoded CSR
2020/02/10 10:59:17 [INFO] signed certificate with serial number 328087687681727386760831073265687413205940136472
2020/02/10 10:59:17 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:18 [INFO] encoded CSR
2020/02/10 10:59:18 [INFO] signed certificate with serial number 525069068228188747147886102005817997066385735072
2020/02/10 10:59:18 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master01 k8s-cert]# ls *pem //查看 會(huì)生成8個(gè)證書(shū)
admin-key.pem admin.pem ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem
[root@master01 k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/ //將證書(shū)移動(dòng)到k8s工作目錄下ssl目錄中
配置apiserver
[root@master01 k8s-cert]# cd .. //回到k8s工作目錄
[root@master01 k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz //解壓軟件包
kubernetes/
kubernetes/server/
kubernetes/server/bin/
...
[root@master01 k8s]# cd kubernetes/server/bin/ //進(jìn)入加壓后軟件命令存放目錄
[root@master01 bin]# ls
apiextensions-apiserver kube-apiserver.docker_tag kube-proxy
cloud-controller-manager kube-apiserver.tar kube-proxy.docker_tag
cloud-controller-manager.docker_tag kube-controller-manager kube-proxy.tar
cloud-controller-manager.tar kube-controller-manager.docker_tag kube-scheduler
hyperkube kube-controller-manager.tar kube-scheduler.docker_tag
kubeadm kubectl kube-scheduler.tar
kube-apiserver kubelet mounter
[root@master01 bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/ //復(fù)制關(guān)鍵命令文件到k8s工作目錄的bin目錄中
[root@master01 bin]# cd /root/k8s/
[root@master01 k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' ' //生成一個(gè)序列號(hào)
c37758077defd4033bfe95a071689272
[root@master01 k8s]# vim /opt/kubernetes/cfg/token.csv //創(chuàng)建token.csv文件,可以理解為創(chuàng)建一個(gè)管理性的角色
c37758077defd4033bfe95a071689272,kubelet-bootstrap,10001,"system:kubelet-bootstrap" //指定用戶角色身份,前面的序列號(hào)使用生成的序列號(hào)
:wq
[root@master01 k8s]# bash apiserver.sh 192.168.80.12 https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 //二進(jìn)制文件,token,證書(shū)都準(zhǔn)備好,執(zhí)行apiserver腳本,同時(shí)生成配置文件
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@master01 k8s]# ps aux | grep kube //檢查進(jìn)程是否啟動(dòng)成功
root 17088 8.7 16.7 402260 312192 ? Ssl 11:17 0:08 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 --bind-address=192.168.80.12 --secure-port=6443 --advertise-address=192.168.80.12 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root 17101 0.0 0.0 112676 980 pts/0 S+ 11:19 0:00 grep --color=auto kube
[root@master01 k8s]# cat /opt/kubernetes/cfg/kube-apiserver //查看生成的配置文件
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 \
--bind-address=192.168.80.12 \
--secure-port=6443 \
--advertise-address=192.168.80.12 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
[root@master01 k8s]# netstat -ntap | grep 6443 //查看監(jiān)聽(tīng)的端口是否開(kāi)啟
tcp 0 0 192.168.80.12:6443 0.0.0.0:* LISTEN 17088/kube-apiserve
tcp 0 0 192.168.80.12:48320 192.168.80.12:6443 ESTABLISHED 17088/kube-apiserve
tcp 0 0 192.168.80.12:6443 192.168.80.12:48320 ESTABLISHED 17088/kube-apiserve
[root@master01 k8s]# netstat -ntap | grep 8080 //查看監(jiān)聽(tīng)的端口是否開(kāi)啟
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 17088/kube-apiserve
[root@master01 k8s]# ./scheduler.sh 127.0.0.1 //直接執(zhí)行腳本,啟動(dòng)服務(wù),并生成配置文件即可
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master01 k8s]# systemctl status kube-scheduler.service //查看服務(wù)運(yùn)行狀態(tài)
● kube-scheduler.service - Kubernetes Scheduler
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
Active: active (running) since 一 2020-02-10 11:22:13 CST; 2min 46s ago //成功運(yùn)行
Docs: https://github.com/kubernetes/kubernetes
...
[root@master01 k8s]# chmod +x controller-manager.sh //添加腳本執(zhí)行權(quán)限
[root@master01 k8s]# ./controller-manager.sh 127.0.0.1 //執(zhí)行腳本,啟動(dòng)服務(wù),并生成配置文件
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@master01 k8s]# systemctl status kube-controller-manager.service //查看運(yùn)行狀態(tài)
● kube-controller-manager.service - Kubernetes Controller Manager
Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
Active: active (running) since 一 2020-02-10 11:28:21 CST; 7min ago //成功運(yùn)行
...
[root@master01 k8s]# /opt/kubernetes/bin/kubectl get cs //查看節(jié)點(diǎn)運(yùn)行狀態(tài)
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。