{"id":6618,"date":"2022-04-09T22:35:30","date_gmt":"2022-04-09T14:35:30","guid":{"rendered":"https:\/\/egonlin.com\/?p=6618"},"modified":"2024-04-29T22:09:02","modified_gmt":"2024-04-29T14:09:02","slug":"kubeadm%e5%ae%89%e8%a3%85k8s","status":"publish","type":"post","link":"https:\/\/egonlin.com\/?p=6618","title":{"rendered":"kubeadm\u5b89\u88c5k8s"},"content":{"rendered":"<h1>kubeadm\u5b89\u88c5k8s<\/h1>\n<h2>\u4e00\u3001\u7b80\u4ecb<\/h2>\n<pre><code> Kubernetes\u6709\u4e24\u79cd\u65b9\u5f0f\uff0c\u7b2c\u4e00\u79cd\u662f\u4e8c\u8fdb\u5236\u7684\u65b9\u5f0f\uff0c\u53ef\u5b9a\u5236\u4f46\u662f\u90e8\u7f72\u590d\u6742\u5bb9\u6613\u51fa\u9519\uff1b\u7b2c\u4e8c\u79cd\u662fkubeadm\u5de5\u5177\u5b89\u88c5\uff0c\u90e8\u7f72\u7b80\u5355\uff0c\u4e0d\u53ef\u5b9a\u5236\u5316\u3002\u672c\u6b21\u6211\u4eec\u90e8\u7f72kubeadm\u7248.\r\n \u670d\u52a1\u5668\u914d\u7f6e\u81f3\u5c11\u662f2G2\u6838\u7684\u3002\u5982\u679c\u4e0d\u662f\u5219\u53ef\u4ee5\u5728\u96c6\u7fa4\u521d\u59cb\u5316\u540e\u9762\u589e\u52a0 --ignore-preflight-errors=NumCPU<\/code><\/pre>\n<h2>\u4e8c\u3001\u90e8\u7f72\u89c4\u5212<\/h2>\n<h3>1\u3001\u7248\u672c\u89c4\u5212<\/h3>\n<p><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s1.png'><img class=\"lazyload lazyload-style-2\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  data-original=\"https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s1.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" \/><\/div><\/p>\n<h3>2\u3001\u8282\u70b9\u89c4\u5212<\/h3>\n<p><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s2.png'><img class=\"lazyload lazyload-style-2\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  data-original=\"https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s2.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" \/><\/div><\/p>\n<h2>\u4e09\u3001\u4fee\u6539\u7f51\u7edc\u53ca\uff08\u4e09\u53f0\u4e3b\u673a\uff09<\/h2>\n<h3>1\u3001\u4fee\u6539\u865a\u62df\u7f51\u7edc\u7f16\u8f91\u5668<\/h3>\n<p><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s3.png'><img class=\"lazyload lazyload-style-2\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  data-original=\"https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s3.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" \/><\/div><\/p>\n<p><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s4.png'><img class=\"lazyload lazyload-style-2\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  data-original=\"https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s4.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" \/><\/div><\/p>\n<h3>2\u3001\u514b\u9686\u4e3b\u673a<\/h3>\n<pre><code> 192.168.15.31   k8s-m-01 m1\r\n 192.168.15.32   k8s-n-01 n1\r\n 192.168.15.33   k8s-n-02 n2<\/code><\/pre>\n<h3>3\u3001\u4fee\u6539\u4e09\u53f0\u5b9e\u4f8bIP\u7f51\u5173<\/h3>\n<p><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s5.png'><img class=\"lazyload lazyload-style-2\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  data-original=\"https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s5.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" \/><\/div><\/p>\n<p><strong>\u5185\u7f51eth1\u4e5f\u8981\u4e0d\u540cIP\u5426\u5219\u4e09\u53f0\u673a\u5668IP\u51b2\u7a81\u3002\u4fee\u6539\u5b8c\u6210\u540e\u91cd\u542f\u7f51\u5361\uff0cping baidu.com \u67e5\u770b\u7f51\u7edc\u662f\u5426\u7545\u901a<\/strong><\/p>\n<h2>\u56db\u3001\u4fee\u6539\u4e3b\u673a\u540d\u53ca\u89e3\u6790(\u4e09\u53f0\u8282\u70b9)<\/h2>\n<h3>1\u3001\u4fee\u6539\u4e3b\u673a\u540d<\/h3>\n<pre><code> hostnamectl set-hostname k8s-master-01\r\n hostnamectl set-hostname k8s-node-01\r\n hostnamectl set-hostname k8s-node-02<\/code><\/pre>\n<h3>2\u3001\u6dfb\u52a0host\u89e3\u6790<\/h3>\n<pre><code> cat \/etc\/hosts\r\n 192.168.15.31  k8s-master-01 m1\r\n 192.168.15.32  k8s-node-01   n1\r\n 192.168.15.33  k8s-node-02   n2<\/code><\/pre>\n<h3>3\u3001\u6dfb\u52a0DNS\u89e3\u6790<\/h3>\n<pre><code> [root@k8s-master-01 ~]# vim \/etc\/resolv.conf\r\n # Generated by NetworkManager\r\n nameserver 223.5.5.5\r\n nameserver 114.114.114.114<\/code><\/pre>\n<h2>\u4e94\u3001\u7cfb\u7edf\u4f18\u5316(\u4e09\u4e2a\u8282\u70b9\u5168\u505a)<\/h2>\n<h3>1\u3001\u5173\u95edselinux<\/h3>\n<pre><code class=\"language-yaml\"># \u6c38\u4e45\u5173\u95ed\r\n sed -i 's#enforcing#disabled#g' \/etc\/selinux\/config\r\n \u200b\r\n #\u4e34\u65f6\u5173\u95ed\r\n setenforce 0<\/code><\/pre>\n<h3>2\u3001\u5173\u95ed\u9632\u706b\u5899<\/h3>\n<pre><code class=\"language-yaml\"> systemctl disable --now firewalld<\/code><\/pre>\n<h3>3\u3001\u5173\u95edswap\u5206\u533a<\/h3>\n<pre><code class=\"language-yaml\"> # \u5173\u95edswap\u5206\u533a\r\n swapoff -a \r\n \u200b\r\n # kubelet\u5ffd\u7565swap\r\n echo 'KUBELET_EXTRA_ARGS=\"--fail-swap-on=false\"' &gt; \/etc\/sysconfig\/kubelet\r\n \u200b\r\n # \u6ce8\u91caswap\u5206\u533a\r\n vim \/etc\/fstab<\/code><\/pre>\n<h3>4\u3001\u505a\u514d\u5bc6\u767b\u5f55(\u4e3b\u8282\u70b9\u505a)<\/h3>\n<pre><code class=\"language-yaml\">[root@k8s-master-01 ~]# rm -rf \/root\/.ssh\r\n[root@k8s-master-01 ~]# ssh-keygen      \u4ea4\u4e92\u5f0f\u76f4\u63a5\u5168\u90e8\u56de\u8f66\r\n[root@k8s-master-01 ~]# cd \/root\/.ssh\/\r\n[root@k8s-master-01 ~\/.ssh]# mv id_rsa.pub authorized_keys\r\n[root@k8s-master-01 ~\/.ssh]# scp  -r  \/root\/.ssh  192.168.15.32:\/root\r\n[root@k8s-master-01 ~\/.ssh]# scp  -r  \/root\/.ssh  192.168.15.33:\/root<\/code><\/pre>\n<h3>5\u3001\u540c\u6b65\u96c6\u7fa4\u65f6\u95f4<\/h3>\n<pre><code class=\"language-yaml\">echo '#Timing synchronization time' &gt;&gt;\/var\/spool\/cron\/root  #\u7ed9\u5b9a\u65f6\u4efb\u52a1\u52a0\u4e0a\u6ce8\u91ca\r\necho '0 *\/1 * * * \/usr\/sbin\/ntpdate ntp1.aliyun.com &amp;&gt;\/dev\/null' &gt;&gt;\/var\/spool\/cron\/root     #\u8bbe\u7f6e\u5b9a\u65f6\u4efb\u52a1\r\ncrontab -l  #\u68c0\u67e5\u7ed3\u679c<\/code><\/pre>\n<h3>6\u3001\u66f4\u65b0yum\u6e90<\/h3>\n<pre><code class=\"language-yaml\">rm -rf \/etc\/yum.repos.d\/*\r\n\r\ncurl -o \/etc\/yum.repos.d\/CentOS-Base.repo https:\/\/repo.huaweicloud.com\/repository\/conf\/CentOS-7-reg.repo\r\n\r\nyum remove epel-release\r\nrm -rf  \/var\/cache\/yum\/x86_64\/6\/epel\/\r\nyum install -y https:\/\/repo.huaweicloud.com\/epel\/epel-release-latest-7.noarch.rpm\r\n\r\nsed -i \"s\/#baseurl\/baseurl\/g\" \/etc\/yum.repos.d\/epel.repo\r\nsed -i \"s\/metalink\/#metalink\/g\" \/etc\/yum.repos.d\/epel.repo\r\nsed -i \"s@https\\?:\/\/download.fedoraproject.org\/pub@https:\/\/repo.huaweicloud.com@g\" \/etc\/yum.repos.d\/epel.repo\r\n\r\nyum clean all\r\nyum makecache<\/code><\/pre>\n<h3>7\u3001\u66f4\u65b0\u7cfb\u7edf\u8f6f\u4ef6(\u6392\u9664\u5185\u6838)<\/h3>\n<pre><code class=\"language-yaml\">yum update -y --exclud=kernel*<\/code><\/pre>\n<h3>8\u3001\u5b89\u88c5\u57fa\u7840\u5e38\u7528\u8f6f\u4ef6<\/h3>\n<pre><code class=\"language-yaml\">yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y<\/code><\/pre>\n<h3>9\u3001\u66f4\u65b0\u7cfb\u7edf\u5185\u6838<\/h3>\n<p>\uff08docker \u5bf9\u7cfb\u7edf\u5185\u6838\u8981\u6c42\u6bd4\u8f83\u9ad8\uff0c\u6700\u597d\u4f7f\u75284.4+\uff09<br \/>\n<strong>\u4e3b\u8282\u70b9\u64cd\u4f5c<\/strong><\/p>\n<pre><code class=\"language-yaml\">[root@k8s-master-01 ~]# wget https:\/\/elrepo.org\/linux\/kernel\/el7\/x86_64\/RPMS\/kernel-lt-5.4.107-1.el7.elrepo.x86_64.rpm\r\n\r\n[root@k8s-master-01 ~]# wget https:\/\/elrepo.org\/linux\/kernel\/el7\/x86_64\/RPMS\/kernel-lt-devel-5.4.107-1.el7.elrepo.x86_64.rpm\r\n\r\n[root@k8s-master-01 ~]# for i in n1 n2 m1 ; do scp kernel-lt-* $i:\/opt; done<\/code><\/pre>\n<p><strong>\u4e09\u4e2a\u8282\u70b9\u64cd\u4f5c<\/strong><\/p>\n<pre><code class=\"language-yaml\">#\u5b89\u88c5\r\nyum localinstall -y \/opt\/kernel-lt*\r\n\r\n#\u8c03\u5230\u9ed8\u8ba4\u542f\u52a8\r\ngrub2-set-default 0 &amp;&amp; grub2-mkconfig -o \/etc\/grub2.cfg \r\n\r\n#\u67e5\u770b\u5f53\u524d\u9ed8\u8ba4\u542f\u52a8\u7684\u5185\u6838\r\ngrubby --default-kernel\r\n\r\n#\u91cd\u542f\u7cfb\u7edf\r\nreboot<\/code><\/pre>\n<h3>10\u3001\u5b89\u88c5IPVS<\/h3>\n<pre>1\uff09yum\u5b89\u88c5\r\nyum install -y conntrack-tools ipvsadm ipset conntrack libseccomp \r\n2\uff09\u52a0\u8f7dIPVS\u6a21\u5757<\/pre>\n<pre class=\"md-fences mock-cm md-end-block md-fences-with-lineno\">cat &gt; \/etc\/sysconfig\/modules\/ipvs.modules &lt;&lt;\"EOF\" \r\n#!\/bin\/bash \r\nipvs_modules=\"ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack\" \r\n\r\nfor kernel_module in ${ipvs_modules}; \r\ndo \r\n\t\/sbin\/modinfo -F filename ${kernel_module} &gt; \/dev\/null 2&gt;&amp;1 \r\n\tif [ $? -eq 0 ]; then \r\n\t\t\/sbin\/modprobe ${kernel_module} \r\n\tfi \r\ndone \r\nEOF\r\n\r\nchmod 755 \/etc\/sysconfig\/modules\/ipvs.modules &amp;&amp; bash \/etc\/sysconfig\/modules\/ipvs.modules &amp;&amp; lsmod | grep ip_vs\r\n<\/pre>\n<h3>11\u3001\u4fee\u6539\u5185\u6838\u542f\u52a8\u53c2\u6570\u4f18\u5316<\/h3>\n<pre><code class=\"language-yaml\">cat &gt; \/etc\/sysctl.d\/k8s.conf &lt;&lt; EOF\r\nnet.ipv4.ip_forward = 1\r\nnet.bridge.bridge-nf-call-iptables = 1\r\nnet.bridge.bridge-nf-call-ip6tables = 1\r\nfs.may_detach_mounts = 1\r\nvm.overcommit_memory=1\r\nvm.panic_on_oom=0\r\nfs.inotify.max_user_watches=89100\r\nfs.file-max=52706963\r\nfs.nr_open=52706963\r\nnet.ipv4.tcp_keepalive_time = 600\r\nnet.ipv4.tcp.keepaliv.probes = 3\r\nnet.ipv4.tcp_keepalive_intvl = 15\r\nnet.ipv4.tcp.max_tw_buckets = 36000\r\nnet.ipv4.tcp_tw_reuse = 1\r\nnet.ipv4.tcp.max_orphans = 327680\r\nnet.ipv4.tcp_orphan_retries = 3\r\nnet.ipv4.tcp_syncookies = 1\r\nnet.ipv4.tcp_max_syn_backlog = 16384\r\nnet.ipv4.ip_conntrack_max = 65536\r\nnet.ipv4.tcp_max_syn_backlog = 16384\r\nnet.ipv4.top_timestamps = 0\r\nnet.core.somaxconn = 16384\r\nEOF\r\n\r\n# \u7acb\u5373\u751f\u6548\r\nsysctl --system<\/code><\/pre>\n<h3>12\u3001\u5b89\u88c5docker(\u4e09\u53f0\u8282\u70b9\u90fd\u8981\u505a)<\/h3>\n<h4>1\uff09\u5378\u8f7d\u4e4b\u524d\u7684docker<\/h4>\n<pre><code>yum remove docker docker-common docker-selinux docker-engine -y<\/code><\/pre>\n<h4>2\uff09\u5b89\u88c5docker\u6240\u9700\u5b89\u88c5\u5305<\/h4>\n<pre><code>yum install -y yum-utils device-mapper-persistent-data lvm2<\/code><\/pre>\n<h4>3\uff09\u5b89\u88c5docker yum\u6e90<\/h4>\n<pre><code>wget -O \/etc\/yum.repos.d\/docker-ce.repo https:\/\/repo.huaweicloud.com\/docker-ce\/linux\/centos\/docker-ce.repo<\/code><\/pre>\n<h4>4\uff09\u5b89\u88c5docker<\/h4>\n<pre><code class=\"language-yaml\">yum install docker-ce -y\r\n# \u4fee\u6539\u914d\u7f6e\uff1a\u9a71\u52a8\u4e0ekubelet\u4fdd\u6301\u4e00\u81f4\uff0c\u5426\u5219\u4f1a\u540e\u671f\u65e0\u6cd5\u542f\u52a8kubelet\r\n{\r\n\"exec-opts\": [\"native.cgroupdriver=systemd\"],\r\n\"registry-mirrors\":[\"https:\/\/reg-mirror.qiniu.com\/\"]\r\n}<\/code><\/pre>\n<p><strong>\u4e0d\u6210\u529f\u591a\u6267\u884c\u51e0\u6b21<\/strong><\/p>\n<h4>5\uff09\u542f\u52a8\u5e76\u8bbe\u7f6e\u5f00\u673a\u81ea\u542f<\/h4>\n<pre><code>systemctl enable --now docker.service<\/code><\/pre>\n<h2>\u516d\u3001\u5b89\u88c5k8s<\/h2>\n<h3>1\u3001\u5b89\u88c5kubelet(\u6240\u6709\u8282\u70b9\u90fd\u8981\u88c5)<\/h3>\n<h4>1\uff09\u5b89\u88c5kebenetes yum \u6e90<\/h4>\n<pre><code class=\"language-yaml\">cat &lt; \/etc\/yum.repos.d\/kubernetes.repo\r\n[kubernetes]\r\nname=Kubernetes\r\nbaseurl=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/repos\/kubernetes-el7-x86_64\/\r\nenabled=1\r\ngpgcheck=0\r\nrepo_gpgcheck=0\r\ngpgkey=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/yum-key.gpg https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/rpm-package-key.gpg\r\nEOF<\/code><\/pre>\n<h4>2\uff09\u5b89\u88c5\u5e76\u542f\u52a8\u4e14\u5f00\u673a\u81ea\u542f<\/h4>\n<pre><code class=\"language-yaml\">[root@k8s-m-01 ~]# yum install -y kubelet kubeadm kubectl \r\n[root@k8s-m-01 ~]# systemctl enable --now kubelet<\/code><\/pre>\n<h3>2\u3001\u4e3b\u8282\u70b9\u64cd\u4f5c\uff08node\u8282\u70b9\u4e0d\u6267\u884c\uff09<\/h3>\n<h4>1\uff09\u521d\u59cb\u5316master\u8282\u70b9(\u4ec5\u5728master\u8282\u70b9\u4e0a\u6267\u884c)<\/h4>\n<pre><code class=\"language-yaml\">kubeadm init \\\r\n--image-repository=registry.cn-hangzhou.aliyuncs.com\/k8sos \\\r\n--kubernetes-version=v1.20.2 \\\r\n--service-cidr=10.96.0.0\/12 \\\r\n--pod-network-cidr=10.244.0.0\/16\r\n# \u4e5f\u53ef\u4ee5\u4f7f\u7528\r\n--image-repository=registry.cn-hangzhou.aliyuncs.com\/google_containers\r\n<\/code><\/pre>\n<p><strong>kubeadm init\u8fc7\u7a0b\u4e2d\u5e38\u89c1\u95ee\u9898\u5904\u7406<\/strong><br \/>\n\uff081\uff09\u7f3a\u5c11\u9ed8\u8ba4\u8def\u7531<br \/>\nroute add default gw xxx.xxx.xxx.xxx dev \u7f51\u5361\u540d<br \/>\n\uff082\uff09\u63d0\u793awarning\u4fe1\u606f\u8bf4proxy\u4ee3\u7406\u95ee\u9898\uff0c\u4f1a\u5f71\u54cd\u5b89\u88c5\uff0c\u5728scheduler\u7b49\u7ec4\u4ef6\u91cc\u4f1a\u62a5Forbidden\u9519\u8bef\uff0c\u53bb\u6389\u8be5warnning\u4fe1\u606f\u5982\u4e0b<\/p>\n<pre><code class=\"language-yaml\"># \u6b65\u9aa41\r\nvim \/etc\/profile\r\nexport no_proxy=127.0.0.1,\u672c\u673aip\u5730\u5740\r\n\r\n# \u6b65\u9aa42\r\nsource \/etc\/profile<\/code><\/pre>\n<p>\uff083\uff09kubeadm\u9ed8\u8ba4\u955c\u50cf\u62c9\u53d6\u5730\u5740\u4e3ak8s.gcr.io\uff0c\u8bf7\u4f7f\u7528\u6307\u5b9a\u4ed3\u5e93<\/p>\n<p>\uff084\uff09\u9a71\u52a8\u4e0d\u5339\u914d\u95ee\u9898\uff0csystemd<br \/>\n<div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s7.png'><img class=\"lazyload lazyload-style-2\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  data-original=\"https:\/\/egonlin.com\/wp-content\/uploads\/2022\/04\/kubeadm\u5b89\u88c5k8s7.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" \/><\/div><\/p>\n<p>\u6b65\u9aa41\u3001\u4fee\u6539docker\u914d\u7f6e\uff0c\u5e76\u91cd\u590d\u670d\u52a1<\/p>\n<pre><code class=\"language-yaml\">\u7f16\u8f91\/etc\/docker\/daemon.json\u65b0\u589e\u914d\u7f6e\uff0c\u91cd\u542fdocker\r\n{\r\n......\r\n\"exec-opts\": [\"native.cgroupdriver=systemd\"],\r\n......\r\n}\r\n\u6ce8\u610f\uff01\uff01\uff01\uff1adaemon.json\u4e2dlive-restore: true\u7684\u60c5\u51b5\u4e0b\u624d\u53ef\u4ee5\u91cd\u542fdocker\uff0c\u5426\u5219\u4f1a\u5bfc\u81f4\u5bb9\u5668\u6302\u6389<\/code><\/pre>\n<p>\u6b65\u9aa42\u3001\u4fee\u6539kubelet\u914d\u7f6e<\/p>\n<pre><code class=\"language-yaml\">[root@jsswx191 ~]# vi \/var\/lib\/kubelet\/config.yaml\r\n......\r\ncgroupDriver: systemd\r\n\r\n[root@jsswx191 ~]# vi \/var\/lib\/kubelet\/kubeadm-flags.env\r\nKUBELET_KUBEADM_ARGS=\"--cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io\/pause:3.1\"<\/code><\/pre>\n<p>\u6b65\u9aa43\u3001\u91cd\u590ddocker\u4e0ekubelet<\/p>\n<pre><code class=\"language-yaml\">systemctl daemon-reload\r\nsystemctl restart docker\r\nsystemctl restart kubelet<\/code><\/pre>\n<p>\u6b65\u9aa44\u3001\u68c0\u67e5swap\u662f\u5426\u5173\u95ed<\/p>\n<pre><code class=\"language-yaml\">swapoff -a<\/code><\/pre>\n<p>\u6b65\u9aa45\u3001\u6700\u540e\u3001\u68c0\u67e5<\/p>\n<pre><code class=\"language-yaml\">[root@jsswx191 ~]# docker info|grep \"Cgroup Driver\" \u662f\u5426\u8f93\u51fa Cgroup Driver: systemd\r\n\r\n[root@xxx ~]# ps aux |grep  \/usr\/bin\/kubelet |grep -v grep \r\n\r\nroot 581806 17.6 0.0 5633952 131056 ? Ssl 14:27 9:05 \/usr\/bin\/kubelet --bootstrap-kubeconfig=\/etc\/kubernetes\/bootstrap-kubelet.conf --kubeconfig=\/etc\/kubernetes\/kubelet.conf --config=\/var\/lib\/kubelet\/config.yaml --cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io\/pause:3.1 --fail-swap-on=false<\/code><\/pre>\n<p>\uff085\uff09kubeadm init\u62c9\u8d77\u96c6\u7fa4\u8d85\u65f6\uff0cpause\u5bb9\u5668\u4e0d\u505c\u5730created\uff0c\u6709\u53ef\u80fddocker\u7684host\u7f51\u7edc\u7f3a\u5931\u95ee\u9898\uff0c\u6392\u9519\u65b9\u6cd5<\/p>\n<pre><code class=\"language-yaml\">docker container ls -a |grep pause\r\ndocker inspect \u5bb9\u5668ID\r\n\u5982\u679c\u53d1\u73b0docker\u7684host\u7f51\u7edc\u7f3a\u5931\u95ee\u9898\uff0c\u5bfc\u81f4pause\u5bb9\u5668\u521b\u5efa\u5931\u8d25\uff0c\u540e\u7eed\u7684\u7ec4\u4ef6\u5bb9\u5668\u90fd\u62c9\u4e0d\u8d77\u6765\uff0c\u9700\u8981\u624b\u52a8\u91cd\u5efadocker\u7f51\u7edc<\/code><\/pre>\n<p>\uff086\uff09kubectl\u547d\u4ee4\u4e0d\u53ef\u7528<\/p>\n<pre><code class=\"language-yaml\">cat \/etc\/profile\r\nls \/etc\/profile.d\/ \u4e0b\u6709\u4e00\u7cfb\u5217\u811a\u672c\u6587\u4ef6\uff0c\u53ef\u80fd\u5bf9kubectl\u505a\u4e86\u522b\u540d\uff0c\u89e3\u51b3\u65b9\u5f0f\u89c1\u56fe\r\n<\/code><\/pre>\n<p>\uff087\uff09kubeadm init\u521b\u5efa\u5b8c\u96c6\u7fa4\u540e\uff0c\u6709pod\u4e00\u76f4\u662fpending\u72b6\u6001<\/p>\n<pre><code class=\"language-yaml\">kubectl describe pod\u5982\u679c\u53d1\u73b0\u95ee\u9898\r\n3 node(s) had taints that the pod didn't tolerate.\r\n\r\nkubernetes\u51fa\u4e8e\u5b89\u5168\u8003\u8651\u9ed8\u8ba4\u60c5\u51b5\u4e0b\u65e0\u6cd5\u5728master\u8282\u70b9\u4e0a\u90e8\u7f72pod\uff0c\u4e8e\u662f\u7528\u4e0b\u9762\u65b9\u6cd5\u53bb\u6389master\u8282\u70b9\u7684\u6c61\u70b9\uff1a\r\nkubectl taint nodes --all node-role.kubernetes.io\/master-<\/code><\/pre>\n<p>\uff088\uff09etcd\u5bb9\u5668\u6302\u6389\uff0c\u8fdb\u800c\u5bfc\u81f4apiserver\u6302\u6389\uff1a\u5efa\u8bae\u5148docker inspect \u5bb9\u5668ID\u67e5\u770b\uff0c\u4e00\u4e2a\u53ef\u80fd\u7684\u539f\u56e0\u662f<\/p>\n<pre><code>apiserver\u76d1\u542c\u5730\u5740\u4e0d\u5bf9\uff0c\u8bf7\u68c0\u67e5\/etc\/hosts\u6587\u4ef6\u4e2d\u914d\u7f6e\u7684\u5730\u5740\u662f\u5426\u53ef\u4ee5ping\u901a\uff0c\u7136\u540ekubeadm reset -f\uff0c\u91cd\u65b0kubeadm init ...<\/code><\/pre>\n<p>\uff089\uff09\u786c\u76d8\u8d44\u6e90\u4e0d\u591f\u7528<\/p>\n<pre><code class=\"language-yaml\">systemctl status kubelet # \u62a5\u9519\uff1amust evict pod(s) to reclaim ephemeral-storage\r\ndf -Th\u67e5\u770b\u4e00\u4e0b\u5269\u4f59\u78c1\u76d8\u5bb9\u91cf\r\n\u5982\u679c\/etc\/docker\/daemon.json\u5185\u914d\u7f6e\u4e86live-restore: true\uff0c\u5219\u53ef\u4ee5\u653e\u5fc3\u8fdb\u884c\u4e0b\u8ff0\u6b65\u9aa4\uff0c\u89e3\u51b3\u5b8c\u6bd5\u540e\u91cd\u65b0kubeadm init...\uff0c\u5426\u5219\u91cd\u542fdocker\u4f1a\u6302\u6389\u5bb9\u5668\uff0c\u6b64\u65f6\u5219\u9700\u8981\u4e0e\u56e2\u961f\u540c\u4e8b\u6c9f\u901a\u662f\u5426\u53ef\u4ee5\u540e\u624d\u53ef\u8fdb\u884c\u64cd\u4f5c\r\n1\u3001\u5236\u4f5c\u65b0\u76d8\u6302\u8f7d\u5230\/data\u76ee\u5f55\uff0c\u6ce8\u610f\u6587\u4ef6\u7cfb\u7edf\u683c\u5f0f\u4e0edocker\u539f\u955c\u50cf\u76ee\u5f55\/var\/lib\/docker\u4fdd\u6301\u4e00\u81f4\r\n2\u3001systemctl stop docker\r\n3\u3001mv \/var\/lib\/docker \/data\/\r\n4\u3001ln -s \/data\/docker \/var\/lib\/docker\r\n5\u3001systemctl start docker<\/code><\/pre>\n<p>\uff0810\uff09\u62a5\u9519\uff0c\u63d0\u793a\u7aef\u53e3\u8303\u56f4\u95ee\u9898<\/p>\n<pre><code class=\"language-yaml\"># kubeadm \u66f4\u6539NodePort\u7aef\u53e3\u8303\u56f4\r\nkubernetes\u9ed8\u8ba4\u7aef\u53e3\u53f7\u8303\u56f4\u662f 30000-32767 \uff0c\u5982\u679c\u671f\u671b\u503c\u4e0d\u662f\u8fd9\u4e2a\u533a\u95f4\u5219\u9700\u8981\u66f4\u6539\u3002\r\n1\u3001\u627e\u5230\u914d\u7f6e\u6587\u4ef6\u91cc\uff0c\u4e00\u822c\u7684\u5728\u8fd9\u4e2a\u6587\u4ef6\u5939\u4e0b\uff1a \/etc\/kubernetes\/manifests\/\r\n2\u3001\u627e\u5230\u6587\u4ef6\u540d\u4e3akube-apiserver.yaml \u7684\u6587\u4ef6\uff0c\u4e5f\u53ef\u80fd\u662fjson\u683c\u5f0f\r\n3\u3001\u7f16\u8f91\u6dfb\u52a0\u914d\u7f6e service-node-port-range=1024-65535\uff0c\u5982\u4e0b\u56fe\u6240\u793a\r\n<\/code><\/pre>\n<p>\uff0811\uff09kubectl get pods\u65f6\u62a5\u9519\uff0c\u6307\u5411\u4e00\u4e2a\u672a\u77e5\u7684ip\u5730\u5740<\/p>\n<pre><code class=\"language-yaml\"># \u62a5\u9519\u4fe1\u606f\u5982\u4e0b\r\n[xxx@xxx ~]$ kubectl get pods\r\nThe connection to the server 172.111.66.53:6443 was refused - did you specify the right host or port?\r\n\r\n# \u53ef\u80fd\u662f\u7cfb\u7edf\u73af\u5883\u88ab\u8bbe\u7f6e\u8fc7\u73af\u5883\u53d8\u91cf\uff0c\u53ef\u4ee5\u67e5\u770b\u4e00\u4e0b\r\nenv |grep -i proxy\r\n\r\n# \u7136\u540e\r\nunset http_proxy\r\nunset https_proxy\r\n\r\n# \u6700\u540e\u627e\u5230\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u7684\u5730\u65b9\u4fee\u6539\r\nvi \/etc\/profile\r\n...\r\n\r\nsource \/etc\/profile<\/code><\/pre>\n<p>\uff0812\uff09\u6392\u9519\u547d\u4ee4\u6c47\u603b\uff1a<\/p>\n<pre><code class=\"language-yaml\">systemctl status kubelet\r\nsystemctl status docker\r\ndocker container ls \uff5c grep k8s\u5404\u4e2a\u7ec4\u4ef6\u7684\u5bb9\u5668 # \u6bcf\u4e2a\u5bb9\u5668\u90fd\u642d\u914d\u4e00\u4e2apause\u5bb9\u5668\r\ntail -f \/var\/log\/messages\r\n\r\n\u67e5\u770bdocker\\kubelet\u670d\u52a1\u65e5\u5fd7\r\n\u67e5\u770b\u6240\u6709\u65e5\u5fd7\uff1ajournalctl -u docker --no-pager\r\n\u67e5\u770b\u6700\u8fd1200\u6761\u65e5\u5fd7\u3010\u5206\u9875\u3011\uff1ajournalctl -u docker -n 200\r\n\u67e5\u770b\u6700\u8fd1200\u6761\u65e5\u5fd7\u3010\u4e0d\u5206\u9875\u3011\uff1ajournalctl -u docker -n 200 --no-pager\r\n\r\ndocker container ls -a | grep pause\r\ndocker inspect \u5bb9\u5668id\r\ndocker logs \u5bb9\u5668id<\/code><\/pre>\n<p>\uff0813\uff09\u6e05\u7406\u96c6\u7fa4\uff0c\u7136\u540e\u91cd\u65b0kubeadm init &#8230;<\/p>\n<pre><code class=\"language-yaml\">kubeadm reset -f\r\nrm -rf ~\/.kube\/\r\nrm -rf \/etc\/kubernetes\/\r\nrm -rf \/etc\/cni\r\nrm -rf \/opt\/cni\r\nrm -rf \/var\/lib\/etcd\r\nrm -rf \/var\/etcd\r\n\r\n\u9009\u505a\u9879\r\nyum clean all\r\nyum remove kube*\r\nrm -rf \/etc\/systemd\/system\/kubelet.service.d\r\nrm -rf \/etc\/systemd\/system\/kubelet.service\r\nrm -rf \/usr\/bin\/kube*<\/code><\/pre>\n<h3>2\uff09\u5efa\u7acb\u7528\u6237\u96c6\u7fa4\u6743\u9650<\/h3>\n<pre><code class=\"language-yaml\">mkdir -p $HOME\/.kube\r\nsudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\r\nsudo chown $(id -u):$(id -g) $HOME\/.kube\/config\r\n\r\n#\u5982\u679c\u662froot\u7528\u6237\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528\uff1a\r\necho \"export KUBECONFIG=\/etc\/kubernetes\/admin.conf\" &gt;&gt; ~\/.bash_profile\r\nsource ~\/.bash_profile<\/code><\/pre>\n<h3>3\uff09\u5b89\u88c5\u96c6\u7fa4\u7f51\u7edc\u63d2\u4ef6\uff08flannel.yaml\u89c1\u9644\u4ef6\uff09<\/h3>\n<pre><code>[root@k8s-master-01 ~]#kubectl apply -f<\/code><\/pre>\n<p>flannel.yaml<\/p>\n<pre><code class=\"language-yaml\">---\r\napiVersion: policy\/v1beta1\r\nkind: PodSecurityPolicy\r\nmetadata:\r\n  name: psp.flannel.unprivileged\r\n  annotations:\r\n    seccomp.security.alpha.kubernetes.io\/allowedProfileNames: docker\/default\r\n    seccomp.security.alpha.kubernetes.io\/defaultProfileName: docker\/default\r\n    apparmor.security.beta.kubernetes.io\/allowedProfileNames: runtime\/default\r\n    apparmor.security.beta.kubernetes.io\/defaultProfileName: runtime\/default\r\nspec:\r\n  privileged: false\r\n  volumes:\r\n  - configMap\r\n  - secret\r\n  - emptyDir\r\n  - hostPath\r\n  allowedHostPaths:\r\n  - pathPrefix: \"\/etc\/cni\/net.d\"\r\n  - pathPrefix: \"\/etc\/kube-flannel\"\r\n  - pathPrefix: \"\/run\/flannel\"\r\n  readOnlyRootFilesystem: false\r\n  # Users and groups\r\n  runAsUser:\r\n    rule: RunAsAny\r\n  supplementalGroups:\r\n    rule: RunAsAny\r\n  fsGroup:\r\n    rule: RunAsAny\r\n  # Privilege Escalation\r\n  allowPrivilegeEscalation: false\r\n  defaultAllowPrivilegeEscalation: false\r\n  # Capabilities\r\n  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']\r\n  defaultAddCapabilities: []\r\n  requiredDropCapabilities: []\r\n  # Host namespaces\r\n  hostPID: false\r\n  hostIPC: false\r\n  hostNetwork: true\r\n  hostPorts:\r\n  - min: 0\r\n    max: 65535\r\n  # SELinux\r\n  seLinux:\r\n    # SELinux is unused in CaaSP\r\n    rule: 'RunAsAny'\r\n---\r\nkind: ClusterRole\r\napiVersion: rbac.authorization.k8s.io\/v1\r\nmetadata:\r\n  name: flannel\r\nrules:\r\n- apiGroups: ['extensions']\r\n  resources: ['podsecuritypolicies']\r\n  verbs: ['use']\r\n  resourceNames: ['psp.flannel.unprivileged']\r\n- apiGroups:\r\n  - \"\"\r\n  resources:\r\n  - pods\r\n  verbs:\r\n  - get\r\n- apiGroups:\r\n  - \"\"\r\n  resources:\r\n  - nodes\r\n  verbs:\r\n  - list\r\n  - watch\r\n- apiGroups:\r\n  - \"\"\r\n  resources:\r\n  - nodes\/status\r\n  verbs:\r\n  - patch\r\n---\r\nkind: ClusterRoleBinding\r\napiVersion: rbac.authorization.k8s.io\/v1\r\nmetadata:\r\n  name: flannel\r\nroleRef:\r\n  apiGroup: rbac.authorization.k8s.io\r\n  kind: ClusterRole\r\n  name: flannel\r\nsubjects:\r\n- kind: ServiceAccount\r\n  name: flannel\r\n  namespace: kube-system\r\n---\r\napiVersion: v1\r\nkind: ServiceAccount\r\nmetadata:\r\n  name: flannel\r\n  namespace: kube-system\r\n---\r\nkind: ConfigMap\r\napiVersion: v1\r\nmetadata:\r\n  name: kube-flannel-cfg\r\n  namespace: kube-system\r\n  labels:\r\n    tier: node\r\n    app: flannel\r\ndata:\r\n  cni-conf.json: |\r\n    {\r\n      \"name\": \"cbr0\",\r\n      \"cniVersion\": \"0.3.1\",\r\n      \"plugins\": [\r\n        {\r\n          \"type\": \"flannel\",\r\n          \"delegate\": {\r\n            \"hairpinMode\": true,\r\n            \"isDefaultGateway\": true\r\n          }\r\n        },\r\n        {\r\n          \"type\": \"portmap\",\r\n          \"capabilities\": {\r\n            \"portMappings\": true\r\n          }\r\n        }\r\n      ]\r\n    }\r\n  net-conf.json: |\r\n    {\r\n      \"Network\": \"10.244.0.0\/16\",\r\n      \"Backend\": {\r\n        \"Type\": \"vxlan\"\r\n      }\r\n    }\r\n---\r\napiVersion: apps\/v1\r\nkind: DaemonSet\r\nmetadata:\r\n  name: kube-flannel-ds\r\n  namespace: kube-system\r\n  labels:\r\n    tier: node\r\n    app: flannel\r\nspec:\r\n  selector:\r\n    matchLabels:\r\n      app: flannel\r\n  template:\r\n    metadata:\r\n      labels:\r\n        tier: node\r\n        app: flannel\r\n    spec:\r\n      affinity:\r\n        nodeAffinity:\r\n          requiredDuringSchedulingIgnoredDuringExecution:\r\n            nodeSelectorTerms:\r\n            - matchExpressions:\r\n              - key: kubernetes.io\/os\r\n                operator: In\r\n                values:\r\n                - linux\r\n      hostNetwork: true\r\n      priorityClassName: system-node-critical\r\n      tolerations:\r\n      - operator: Exists\r\n        effect: NoSchedule\r\n      serviceAccountName: flannel\r\n      initContainers:\r\n      - name: install-cni\r\n        image: registry.cn-hangzhou.aliyuncs.com\/alvinos\/flanned:v0.13.1-rc1\r\n        command:\r\n        - cp\r\n        args:\r\n        - -f\r\n        - \/etc\/kube-flannel\/cni-conf.json\r\n        - \/etc\/cni\/net.d\/10-flannel.conflist\r\n        volumeMounts:\r\n        - name: cni\r\n          mountPath: \/etc\/cni\/net.d\r\n        - name: flannel-cfg\r\n          mountPath: \/etc\/kube-flannel\/\r\n      containers:\r\n      - name: kube-flannel\r\n        image: registry.cn-hangzhou.aliyuncs.com\/alvinos\/flanned:v0.13.1-rc1\r\n        command:\r\n        - \/opt\/bin\/flanneld\r\n        args:\r\n        - --ip-masq\r\n        - --kube-subnet-mgr\r\n        resources:\r\n          requests:\r\n            cpu: \"100m\"\r\n            memory: \"50Mi\"\r\n          limits:\r\n            cpu: \"100m\"\r\n            memory: \"50Mi\"\r\n        securityContext:\r\n          privileged: false\r\n          capabilities:\r\n            add: [\"NET_ADMIN\", \"NET_RAW\"]\r\n        env:\r\n        - name: POD_NAME\r\n          valueFrom:\r\n            fieldRef:\r\n              fieldPath: metadata.name\r\n        - name: POD_NAMESPACE\r\n          valueFrom:\r\n            fieldRef:\r\n              fieldPath: metadata.namespace\r\n        volumeMounts:\r\n        - name: run\r\n          mountPath: \/run\/flannel\r\n        - name: flannel-cfg\r\n          mountPath: \/etc\/kube-flannel\/\r\n      volumes:\r\n      - name: run\r\n        hostPath:\r\n          path: \/run\/flannel\r\n      - name: cni\r\n        hostPath:\r\n          path: \/etc\/cni\/net.d\r\n      - name: flannel-cfg\r\n        configMap:\r\n          name: kube-flannel-cfg<\/code><\/pre>\n<h3>4\uff09\u5c06\u5de5\u4f5c\u8282\u70b9\u52a0\u5165\u96c6\u7fa4<\/h3>\n<pre><code class=\"language-yaml\">[root@k8s-master-01 ~]#kubeadm token create    --print-join-command\r\n\r\nkubeadm join 192.168.15.31:6443 --token zvyidd.gxnw8v1zdv3pdlzf     --discovery-token-ca-cert-hash sha256:05b6946a4de3f0e6900291118cf25de7bcdce3bcd19aa53eaaa8ffa86d67e440\r\n## \u6ce8\uff1a\u5c06\u4e0a\u65b9\u751f\u6210\u7684token\u590d\u5236\u5230node\u8282\u70b9\u4e0a\u6267\u884c\u3002<\/code><\/pre>\n<h3>5\uff09\u68c0\u67e5\u96c6\u7fa4\u72b6\u6001<\/h3>\n<pre><code class=\"language-yaml\">## \u7b2c\u4e00\u79cd\u65b9\u5f0f\r\n[root@k8s-master-01 ~]# kubectl get nodes\r\nNAME            STATUS   ROLES                  AGE     VERSION\r\nk8s-master-01   Ready    control-plane,master   11m     v1.20.5\r\nk8s-node-01     Ready                     3m13s   v1.20.5\r\nk8s-node-02     Ready                     3m9s    v1.20.5\r\n\r\n# \u7b2c\u4e8c\u79cd\u65b9\u5f0f\r\n[root@k8s-master-01 ~]# kubectl get pods -n kube-system\r\nNAME                                    READY   STATUS    RESTARTS   AGE\r\ncoredns-f68b4c98f-mmxkc                 1\/1     Running   0          11m\r\ncoredns-f68b4c98f-nvp6b                 1\/1     Running   0          11m\r\netcd-k8s-master-01                      1\/1     Running   0          11m\r\nkube-apiserver-k8s-master-01            1\/1     Running   0          11m\r\nkube-controller-manager-k8s-master-01   1\/1     Running   0          11m\r\nkube-flannel-ds-25kk5                   1\/1     Running   0          4m49s\r\nkube-flannel-ds-9zkkl                   1\/1     Running   0          3m22s\r\nkube-flannel-ds-sx57n                   1\/1     Running   0          3m26s\r\nkube-proxy-2gsrl                        1\/1     Running   0          11m\r\nkube-proxy-jkdbs                        1\/1     Running   0          3m22s\r\nkube-proxy-wqrc2                        1\/1     Running   0          3m26s\r\nkube-scheduler-k8s-master-01            1\/1     Running   0          11m\r\n\r\n# \u7b2c\u4e09\u79cd\u65b9\u5f0f\uff1a\u76f4\u63a5\u9a8c\u8bc1\u96c6\u7fa4DNS\r\n[root@k8s-master-01 ~]# kubectl run test -it --rm --image=busybox:1.28.3\r\nIf you don't see a command prompt, try pressing enter.\r\n\/ # nslookup kubernetes\r\nServer:    10.96.0.10\r\nAddress 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local\r\n\r\nName:      kubernetes\r\nAddress 1: 10.96.0.1 kubernetes.default.svc.cluster.local\r\n<\/code><\/pre>\n<p>\u9644\u4ef6\uff1a<\/p>\n<pre><code class=\"language-yaml\">---\r\napiVersion: policy\/v1beta1\r\nkind: PodSecurityPolicy\r\nmetadata:\r\n  name: psp.flannel.unprivileged\r\n  annotations:\r\n    seccomp.security.alpha.kubernetes.io\/allowedProfileNames: docker\/default\r\n    seccomp.security.alpha.kubernetes.io\/defaultProfileName: docker\/default\r\n    apparmor.security.beta.kubernetes.io\/allowedProfileNames: runtime\/default\r\n    apparmor.security.beta.kubernetes.io\/defaultProfileName: runtime\/default\r\nspec:\r\n  privileged: false\r\n  volumes:\r\n  - configMap\r\n  - secret\r\n  - emptyDir\r\n  - hostPath\r\n  allowedHostPaths:\r\n  - pathPrefix: \"\/etc\/cni\/net.d\"\r\n  - pathPrefix: \"\/etc\/kube-flannel\"\r\n  - pathPrefix: \"\/run\/flannel\"\r\n  readOnlyRootFilesystem: false\r\n  # Users and groups\r\n  runAsUser:\r\n    rule: RunAsAny\r\n  supplementalGroups:\r\n    rule: RunAsAny\r\n  fsGroup:\r\n    rule: RunAsAny\r\n  # Privilege Escalation\r\n  allowPrivilegeEscalation: false\r\n  defaultAllowPrivilegeEscalation: false\r\n  # Capabilities\r\n  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']\r\n  defaultAddCapabilities: []\r\n  requiredDropCapabilities: []\r\n  # Host namespaces\r\n  hostPID: false\r\n  hostIPC: false\r\n  hostNetwork: true\r\n  hostPorts:\r\n  - min: 0\r\n    max: 65535\r\n  # SELinux\r\n  seLinux:\r\n    # SELinux is unused in CaaSP\r\n    rule: 'RunAsAny'\r\n---\r\nkind: ClusterRole\r\napiVersion: rbac.authorization.k8s.io\/v1\r\nmetadata:\r\n  name: flannel\r\nrules:\r\n- apiGroups: ['extensions']\r\n  resources: ['podsecuritypolicies']\r\n  verbs: ['use']\r\n  resourceNames: ['psp.flannel.unprivileged']\r\n- apiGroups:\r\n  - \"\"\r\n  resources:\r\n  - pods\r\n  verbs:\r\n  - get\r\n- apiGroups:\r\n  - \"\"\r\n  resources:\r\n  - nodes\r\n  verbs:\r\n  - list\r\n  - watch\r\n- apiGroups:\r\n  - \"\"\r\n  resources:\r\n  - nodes\/status\r\n  verbs:\r\n  - patch\r\n---\r\nkind: ClusterRoleBinding\r\napiVersion: rbac.authorization.k8s.io\/v1\r\nmetadata:\r\n  name: flannel\r\nroleRef:\r\n  apiGroup: rbac.authorization.k8s.io\r\n  kind: ClusterRole\r\n  name: flannel\r\nsubjects:\r\n- kind: ServiceAccount\r\n  name: flannel\r\n  namespace: kube-system\r\n---\r\napiVersion: v1\r\nkind: ServiceAccount\r\nmetadata:\r\n  name: flannel\r\n  namespace: kube-system\r\n---\r\nkind: ConfigMap\r\napiVersion: v1\r\nmetadata:\r\n  name: kube-flannel-cfg\r\n  namespace: kube-system\r\n  labels:\r\n    tier: node\r\n    app: flannel\r\ndata:\r\n  cni-conf.json: |\r\n    {\r\n      \"name\": \"cbr0\",\r\n      \"cniVersion\": \"0.3.1\",\r\n      \"plugins\": [\r\n        {\r\n          \"type\": \"flannel\",\r\n          \"delegate\": {\r\n            \"hairpinMode\": true,\r\n            \"isDefaultGateway\": true\r\n          }\r\n        },\r\n        {\r\n          \"type\": \"portmap\",\r\n          \"capabilities\": {\r\n            \"portMappings\": true\r\n          }\r\n        }\r\n      ]\r\n    }\r\n  net-conf.json: |\r\n    {\r\n      \"Network\": \"10.244.0.0\/16\",\r\n      \"Backend\": {\r\n        \"Type\": \"vxlan\"\r\n      }\r\n    }\r\n---\r\napiVersion: apps\/v1\r\nkind: DaemonSet\r\nmetadata:\r\n  name: kube-flannel-ds\r\n  namespace: kube-system\r\n  labels:\r\n    tier: node\r\n    app: flannel\r\nspec:\r\n  selector:\r\n    matchLabels:\r\n      app: flannel\r\n  template:\r\n    metadata:\r\n      labels:\r\n        tier: node\r\n        app: flannel\r\n    spec:\r\n      affinity:\r\n        nodeAffinity:\r\n          requiredDuringSchedulingIgnoredDuringExecution:\r\n            nodeSelectorTerms:\r\n            - matchExpressions:\r\n              - key: kubernetes.io\/os\r\n                operator: In\r\n                values:\r\n                - linux\r\n      hostNetwork: true\r\n      priorityClassName: system-node-critical\r\n      tolerations:\r\n      - operator: Exists\r\n        effect: NoSchedule\r\n      serviceAccountName: flannel\r\n      initContainers:\r\n      - name: install-cni\r\n        image: registry.cn-hangzhou.aliyuncs.com\/alvinos\/flanned:v0.13.1-rc1\r\n        command:\r\n        - cp\r\n        args:\r\n        - -f\r\n        - \/etc\/kube-flannel\/cni-conf.json\r\n        - \/etc\/cni\/net.d\/10-flannel.conflist\r\n        volumeMounts:\r\n        - name: cni\r\n          mountPath: \/etc\/cni\/net.d\r\n        - name: flannel-cfg\r\n          mountPath: \/etc\/kube-flannel\/\r\n      containers:\r\n      - name: kube-flannel\r\n        image: registry.cn-hangzhou.aliyuncs.com\/alvinos\/flanned:v0.13.1-rc1\r\n        command:\r\n        - \/opt\/bin\/flanneld\r\n        args:\r\n        - --ip-masq\r\n        - --kube-subnet-mgr\r\n        resources:\r\n          requests:\r\n            cpu: \"100m\"\r\n            memory: \"50Mi\"\r\n          limits:\r\n            cpu: \"100m\"\r\n            memory: \"50Mi\"\r\n        securityContext:\r\n          privileged: false\r\n          capabilities:\r\n            add: [\"NET_ADMIN\", \"NET_RAW\"]\r\n        env:\r\n        - name: POD_NAME\r\n          valueFrom:\r\n            fieldRef:\r\n              fieldPath: metadata.name\r\n        - name: POD_NAMESPACE\r\n          valueFrom:\r\n            fieldRef:\r\n              fieldPath: metadata.namespace\r\n        volumeMounts:\r\n        - name: run\r\n          mountPath: \/run\/flannel\r\n        - name: flannel-cfg\r\n          mountPath: \/etc\/kube-flannel\/\r\n      volumes:\r\n      - name: run\r\n        hostPath:\r\n          path: \/run\/flannel\r\n      - name: cni\r\n        hostPath:\r\n          path: \/etc\/cni\/net.d\r\n      - name: flannel-cfg\r\n        configMap:\r\n          name: kube-flannel-cfg\r\n<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>kubeadm\u5b89\u88c5k8s \u4e00\u3001\u7b80\u4ecb Kubernetes\u6709\u4e24\u79cd\u65b9\u5f0f\uff0c\u7b2c\u4e00\u79cd\u662f\u4e8c\u8fdb\u5236\u7684\u65b9\u5f0f\uff0c\u53ef\u5b9a\u5236\u4f46\u662f\u90e8\u7f72\u590d\u6742\u5bb9 [&hellip;]<\/p>\n","protected":false},"author":3,"featured_media":6635,"comment_status":"closed","ping_status":"closed","sticky":false,"template":"","format":"standard","meta":[],"categories":[417,412],"tags":[],"_links":{"self":[{"href":"https:\/\/egonlin.com\/index.php?rest_route=\/wp\/v2\/posts\/6618"}],"collection":[{"href":"https:\/\/egonlin.com\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/egonlin.com\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/egonlin.com\/index.php?rest_route=\/wp\/v2\/users\/3"}],"replies":[{"embeddable":true,"href":"https:\/\/egonlin.com\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=6618"}],"version-history":[{"count":5,"href":"https:\/\/egonlin.com\/index.php?rest_route=\/wp\/v2\/posts\/6618\/revisions"}],"predecessor-version":[{"id":10774,"href":"https:\/\/egonlin.com\/index.php?rest_route=\/wp\/v2\/posts\/6618\/revisions\/10774"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/egonlin.com\/index.php?rest_route=\/wp\/v2\/media\/6635"}],"wp:attachment":[{"href":"https:\/\/egonlin.com\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=6618"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/egonlin.com\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=6618"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/egonlin.com\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=6618"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}