8

二进制方式部署k8s集群(超详细)

 2 years ago
source link: https://blog.51cto.com/u_13994871/5009424
Go to the source link to view the article. You can view the picture content, updated content and better typesetting reading experience. If the link is broken, please click the button below to view the snapshot at that time.
neoserver,ios ssh client

二进制方式部署k8s集群(超详细)_linux

k8s-master

192.168.0.150

CentOS7.3 3.10

cpu:2核 内存:4G 硬盘:30G

k8s-node01

192.168.0.151

CentOS7.3 3.10

cpu:2核 内存:4G 硬盘:30G

k8s-node02

192.168.0.152

CentOS7.3 3.10

cpu:2核 内存:4G 硬盘:30G

操作系统环境准备(k8s-master,k8s-node01,k8s-node02)

  1. 禁用Selinux

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

  1. 禁用SWAP分区

注释掉/etc/fstab中关于swap的内容

  1. 确保三台服务器时间一致

yum -y install ntpdate && ntpdate cn.ntp.org.cn

  1. 修改内核参数

cat > /etc/sysctl.d/k8s.conf << EOF

net.ipv4.ip_forward = 1

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

sysctl --system

  1. 加载ipvs模块

modprobe -- ip_vs

modprobe -- ip_vs_rr

modprobe -- ip_vs_wrr

modprobe -- ip_vs_sh

modprobe -- nf_conntrack_ipv4

lsmod | grep ip_vs

lsmod | grep nf_conntrack_ipv4

yum install -y ipvsadm

  1. 确保三台服务器可以上外网
  2. 修改/etc/hosts

cat >>/etc/hosts <

192.168.0.150 k8s-master

192.168.0.151 k8s-node01

192.168.0.152 k8s-node02

  1. ​更新系统systemd工具包

​yum -y update systemd

  1. 关闭并且禁用防火墙
  2. ​安装Docker

#移除旧版本docker

yum remove docker \

                 docker-client \

                 docker-client-latest \

                 docker-common \

                 docker-latest \

                 docker-latest-logrotate \

                 docker-logrotate \

                 docker-engine

#安装docker repo源

yum install -y yum-utils

yum-config-manager \

   --add-repo \

     https://download.docker.com/linux/centos/docker-ce.repo

#安装docker

 yum install -y docker-ce

#启动docker并设置开机启动

systemctl start docker && systemctl enable docker

#测试docker

docker info

#配置镜像加速器

cat>/etc/docker/daemon.json <<EOF

 "registry-mirrors": ["https://m81hcukn.mirror.aliyuncs.com"]

#更改Docker的Cgroup为systemd

cat /etc/docker/daemon.json

"exec-opts": ["native.cgroupdriver=systemd"]

#重启docker

systemctl restart docker

  1. ​配置ssh进行免密登录(k8s-master)

[root@k8s-master ~]# ssh-keygen -t rsa

[root@k8s-master ~]# ssh-copy-id [email protected]

[root@k8s-master ~]# ssh-copy-id [email protected]

  1. ​重启三台服务器

reboot

  1. 配置工作目录(k8s-master)

#每台机器都需要配置证书文件、组件的配置文件、组件的服务启动文件,现专门选择 k8s-master 来统一生成这些文件,然后再分发到其他机器

​mkdir -p /data/work

​部署etcd集群

  1. ​配置etcd工作目录

mkdir -p /etc/etcd    #配置文件存放目录

mkdir -p /etc/etcd/ssl   #证书文件存放目录

  1. ​创建etcd证书

#上传证书制作工具

[root@k8s-master work]# ls /data/work/

cfssl  cfssl-certinfo  cfssljson

#工具配置

[root@k8s-master work]# chmod +x cfssl*

[root@k8s-master work]# mv * /usr/local/bin/

#配置ca请求文件

[root@k8s-master work]# vim ca-csr.json  

 "CN": "kubernetes",

 "key": {

     "algo": "rsa",

     "size": 2048

 "names": [

     "C": "CN",

     "ST": "Shanghai",

     "L": "Shanghai",

     "O": "k8s",

     "OU": "system"

 "ca": {

         "expiry": "87600h"

#创建ca证书

[root@k8s-master work]# cfssl gencert -initca ca-csr.json  | cfssljson -bare ca

#配置ca证书策略

[root@k8s-master work]# vim ca-config.json

 "signing": {

     "default": {

         "expiry": "87600h"

     "profiles": {

         "kubernetes": {

             "usages": [

                 "signing",

                 "key encipherment",

                 "server auth",

                 "client auth"

             "expiry": "87600h"

#配置etcd请求csr文件

[root@k8s-master work]# vim etcd-csr.json

 "CN": "etcd",

 "hosts": [

   "127.0.0.1",

   "192.168.1.104",

   "192.168.1.107",

   "192.168.1.108"

 "key": {

   "algo": "rsa",

   "size": 2048

 "names": [{

   "C": "CN",

   "ST": "Shanghai",

   "L": "Shanghai",

   "O": "k8s",

   "OU": "system"

#生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd

  1. ​下载etcd软件包

[root@master1 work]# wget https://github.com/etcd-io/etcd/releases/download/v3.4.13/etcd-v3.4.13-linux-amd64.tar.gz

[root@master1 work]# tar -xf etcd-v3.4.13-linux-amd64.tar.gz  

#拷贝二进制文件

[root@master1 work]# cp -p etcd-v3.4.13-linux-amd64/etcd* /usr/local/bin/

#拷贝二进制到node01和node02

[root@master1 work]# scp etcd-v3.4.13-linux-amd64/etcd* 192.168.1.107:/usr/local/bin/

[root@master1 work]# scp etcd-v3.4.13-linux-amd64/etcd* 192.168.1.108:/usr/local/bin/

  1. ​创建配置文件

[root@k8s-master work]# vim etcd.conf

#[Member]

ETCD_NAME="etcd1"

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_PEER_URLS="https://192.168.1.104:2380"

ETCD_LISTEN_CLIENT_URLS="https://192.168.1.104:2379,http://127.0.0.1:2379"

#[Clustering]

ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.104:2380"

ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.104:2379"

ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.104:2380,etcd2=https://192.168.1.107:2380,etcd3=https://192.168.1.108:2380"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

ETCD_INITIAL_CLUSTER_STATE="new"

ETCD_NAME:节点名称,集群中唯一

ETCD_DATA_DIR:数据目录

ETCD_LISTEN_PEER_URLS:集群通信监听地址

ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址

ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址

ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址

ETCD_INITIAL_CLUSTER:集群节点地址

ETCD_INITIAL_CLUSTER_TOKEN:集群Token

ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

  1. ​创建启动服务文件

[root@k8s-master work]# vim etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

EnvironmentFile=-/etc/etcd/etcd.conf

WorkingDirectory=/var/lib/etcd/

ExecStart=/usr/local/bin/etcd \

 --cert-file=/etc/etcd/ssl/etcd.pem \

 --key-file=/etc/etcd/ssl/etcd-key.pem \

 --trusted-ca-file=/etc/etcd/ssl/ca.pem \

 --peer-cert-file=/etc/etcd/ssl/etcd.pem \

 --peer-key-file=/etc/etcd/ssl/etcd-key.pem \

 --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \

 --peer-client-cert-auth \

 --client-cert-auth

Restart=on-failure

RestartSec=5

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

  1. ​同步相关文件到各个节点

[root@k8s-master work]# cp ca*.pem /etc/etcd/ssl/

[root@k8s-master work]# cp etcd*.pem /etc/etcd/ssl/

[root@k8s-master work]# cp etcd.conf /etc/etcd/

[root@k8s-master work]# cp etcd.service /usr/lib/systemd/system/

[root@k8s-master work]# for i in k8s-node01 k8s-node02;do scp -r /etc/etcd/ $i:/etc/;done

[root@k8s-master work]# for i in k8s-node01 k8s-node02;do scp etcd.service $i:/usr/lib/systemd/system/;done

  1. ​创建数据目录

​mkdir -p /var/lib/etcd/default.etcd

  1. ​node01和node02分别修改配置文件(etcd.conf)中etcd名字和ip,并创建目录 /var/lib/etcd/default.etcd
  2. ​启动etcd集群

k8s-master,k8s-node01,k8s-node02三个节点执行相同命令

systemctl daemon-reload

systemctl enable etcd.service

systemctl start etcd.service

systemctl status etcd

  1. ​查看集群状态

​[root@k8s-master work]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.1.104:2379,https://192.168.1.107:2379,https://192.168.1.108:2379 endpoint health

​kubernetes组件部署

  1. ​下载安装包

[root@k8s-master work]# wget https://dl.k8s.io/v1.20.1/kubernetes-server-linux-amd64.tar.gz

[root@k8s-master work]# tar -xf kubernetes-server-linux-amd64.tar.gz  

[root@k8s-master work]# cd kubernetes/server/bin/

[root@k8s-master bin]# cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/

[root@k8s-master bin]# for i in k8s-node01 k8s-node02;do scp kubelet kube-proxy $i:/usr/local/bin/;done

  1. 创建工作目录

[root@k8s-master work]# mkdir -p /etc/kubernetes/          # kubernetes组件配置文件存放目录

[root@k8s-master work]# mkdir -p /etc/kubernetes/ssl     # kubernetes组件证书文件存放目录

[root@k8s-master work]# mkdir /var/log/kubernetes        # kubernetes组件日志文件存放目录

​部署api-server

创建csr请求文件

[root@k8s-master work]# vim kube-apiserver-csr.json

 "CN": "kubernetes",

 "hosts": [

   "127.0.0.1",

   "192.168.1.104",

   "192.168.1.107",

   "192.168.1.108",

   "192.168.1.109",

   "192.168.1.110",

   "192.168.1.111",

   "10.255.0.1",

   "kubernetes",

   "kubernetes.default",

   "kubernetes.default.svc",

   "kubernetes.default.svc.cluster",

   "kubernetes.default.svc.cluster.local"

 "key": {

   "algo": "rsa",

   "size": 2048

 "names": [

     "C": "CN",

     "ST": "Shanghai",

     "L": "Shanghai",

     "O": "k8s",

     "OU": "system"

如果 hosts 字段不为空则需要指定授权使用该证书的 IP 或域名列表。

由于该证书后续被 kubernetes master 集群使用,需要将master节点的IP都填上,同时还需要填写 service 网络的首个IP。(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.254.0.1)  

生成证书和token文件

[root@k8s-master work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver

[root@k8s-master work]# cat > token.csv << EOF

$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"

创建配置文件

[root@k8s-master work]# vim kube-apiserver.conf

KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \

 --anonymous-auth=false \

 --bind-address=192.168.1.104 \

 --secure-port=6443 \

 --advertise-address=192.168.1.104 \

 --insecure-port=0 \

 --authorization-mode=Node,RBAC \

 --runtime-config=api/all=true \

 --enable-bootstrap-token-auth \

 --service-cluster-ip-range=10.255.0.0/16 \

 --token-auth-file=/etc/kubernetes/token.csv \

 --service-node-port-range=30000-50000 \

 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \

 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \

 --client-ca-file=/etc/kubernetes/ssl/ca.pem \

 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \

 --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \

 --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \

--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \

 --service-account-issuer=https://kubernetes.default.svc.cluster.local \

 --etcd-cafile=/etc/etcd/ssl/ca.pem \

 --etcd-certfile=/etc/etcd/ssl/etcd.pem \

 --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \

 --etcd-servers=https://192.168.1.104:2379,https://192.168.1.107:2379,https://192.168.1.108:2379 \

 --enable-swagger-ui=true \

 --allow-privileged=true \

 --apiserver-count=3 \

 --audit-log-maxage=30 \

 --audit-log-maxbackup=3 \

 --audit-log-maxsize=100 \

 --audit-log-path=/var/log/kube-apiserver-audit.log \

 --event-ttl=1h \

 --alsologtostderr=true \

 --logtostderr=false \

 --log-dir=/var/log/kubernetes \

 --v=4"

–logtostderr:启用日志

–v:日志等级

–log-dir:日志目录

–etcd-servers:etcd集群地址

–bind-address:监听地址

–secure-port:https安全端口

–advertise-address:集群通告地址

–allow-privileged:启用授权

–service-cluster-ip-range:Service虚拟IP地址段

–enable-admission-plugins:准入控制模块

–authorization-mode:认证授权,启用RBAC授权和节点自管理

–enable-bootstrap-token-auth:启用TLS bootstrap机制

–token-auth-file:bootstrap token文件

–service-node-port-range:Service nodeport类型默认分配端口范围

–kubelet-client-xxx:apiserver访问kubelet客户端证书

–tls-xxx-file:apiserver https证书

–etcd-xxxfile:连接Etcd集群证书

–audit-log-xxx:审计日志

创建服务启动文件

[root@k8s-master work]# vim kube-apiserver.service

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/kubernetes/kubernetes

After=etcd.service

Wants=etcd.service

[Service]

EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf

ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS

Restart=on-failure

RestartSec=5

Type=notify

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

同步相关文件

[root@k8s-master work]# cp ca*.pem /etc/kubernetes/ssl/

[root@k8s-master work]# cp kube-apiserver*.pem /etc/kubernetes/ssl/

[root@k8s-master work]# cp token.csv /etc/kubernetes/

[root@k8s-master work]# cp kube-apiserver.conf /etc/kubernetes/  

[root@k8s-master work]# cp kube-apiserver.service /usr/lib/systemd/system/

#启动服务

[root@k8s-master work]# systemctl daemon-reload

[root@k8s-master work]# systemctl enable kube-apiserver

[root@k8s-master work]# systemctl start kube-apiserver

[root@k8s-master work]# systemctl status kube-apiserver

[root@k8s-master work]# curl --insecure https://192.168.1.104:6443/

有返回说明启动正常

​部署kubectl

创建csr请求文件

[root@k8s-master work]# vim admin-csr.json

 "CN": "admin",

 "hosts": [],

 "key": {

   "algo": "rsa",

   "size": 2048

 "names": [

     "C": "CN",

     "ST": "Shanghai",

     "L": "Shanghai",

     "O": "system:masters",              

     "OU": "system"

后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权;

kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限;

O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限;

这个admin 证书,是将来生成管理员用的kube config 配置文件用的,现在我们一般建议使用RBAC 来对kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作为 Group;

“O”: “system:masters”, 必须是system:masters,否则后面kubectl create clusterrolebinding报错。

[root@k8s-master work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

[root@k8s-master work]# cp admin*.pem /etc/kubernetes/ssl/

创建kubeconfig配置文件

kubeconfig 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书

设置集群参数

[root@k8s-master work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.104:6443 --kubeconfig=kube.config

设置客户端认证参数

[root@k8s-master work]# kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config

设置上下文参数

[root@k8s-master work]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config

设置默认上下文

[root@k8s-master work]# kubectl config use-context kubernetes --kubeconfig=kube.config

[root@k8s-master work]# mkdir ~/.kube

[root@k8s-master work]# cp kube.config ~/.kube/config

授权kubernetes证书访问kubelet api权限

[root@k8s-master work]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

查看集群组件状态

[root@k8s-master work]# kubectl cluster-info

[root@k8s-master work]# kubectl get componentstatuses

[root@k8s-master work]# kubectl get all --all-namespaces

​部署kube-controller-manager

创建csr请求文件

[root@k8s-master work]# vim kube-controller-manager-csr.json

   "CN": "system:kube-controller-manager",

   "key": {

       "algo": "rsa",

       "size": 2048

   "hosts": [

     "127.0.0.1",

     "192.168.1.104",

     "192.168.1.107",

     "192.168.1.108"

   "names": [

       "C": "CN",

       "ST": "Shanghai",

       "L": "Shanghai",

       "O": "system:kube-controller-manager",

       "OU": "system"

hosts 列表包含所有 kube-controller-manager 节点 IP;

CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

#生成证书

[root@k8s-master work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

[root@k8s-master work]# ls kube-controller-manager*.pem

#创建kube-controller-manager的kubeconfig

设置集群参数

[root@k8s-master work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.104:6443 --kubeconfig=kube-controller-manager.kubeconfig

设置客户端认证参数

[root@k8s-master work]# kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig

设置上下文参数

[root@k8s-master work]# kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

设置默认上下文

[root@k8s-master work]# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

#创建配置文件

[root@k8s-master work]# vim kube-controller-manager.conf

KUBE_CONTROLLER_MANAGER_OPTS="--port=0 \

 --secure-port=10252 \

 --bind-address=127.0.0.1 \

 --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \

 --service-cluster-ip-range=10.255.0.0/16 \

 --cluster-name=kubernetes \

 --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \

 --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \

 --allocate-node-cidrs=true \

 --cluster-cidr=10.0.0.0/16 \

 --experimental-cluster-signing-duration=87600h \

 --root-ca-file=/etc/kubernetes/ssl/ca.pem \

 --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \

 --leader-elect=true \

 --feature-gates=RotateKubeletServerCertificate=true \

 --controllers=*,bootstrapsigner,tokencleaner \

 --horizontal-pod-autoscaler-use-rest-clients=true \

 --horizontal-pod-autoscaler-sync-period=10s \

 --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \

 --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \

 --use-service-account-credentials=true \

 --alsologtostderr=true \

 --logtostderr=false \

 --log-dir=/var/log/kubernetes \

 --v=2"

#创建启动文件

[root@k8s-master work]# vim kube-controller-manager.service

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf

ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

#同步文件

[root@k8s-master work]# cp kube-controller-manager*.pem /etc/kubernetes/ssl/

[root@k8s-master work]# cp kube-controller-manager.kubeconfig /etc/kubernetes/

[root@k8s-master work]# cp kube-controller-manager.conf /etc/kubernetes/

[root@k8s-master work]# cp kube-controller-manager.service /usr/lib/systemd/system/

#启动服务

[root@k8s-master work]# systemctl daemon-reload  

[root@k8s-master work]# systemctl enable kube-controller-manager

[root@k8s-master work]# systemctl start kube-controller-manager

[root@k8s-master work]# systemctl status kube-controller-manager

​部署kube-scheduler

#创建csr请求文件

[root@k8s-master work]# vim kube-scheduler-csr.json

   "CN": "system:kube-scheduler",

   "hosts": [

     "127.0.0.1",

     "192.168.1.104",

     "192.168.1.107",

     "192.168.1.108"

   "key": {

       "algo": "rsa",

       "size": 2048

   "names": [

       "C": "CN",

       "ST": "Shanghai",

       "L": "Shanghai",

       "O": "system:kube-scheduler",

       "OU": "system"

hosts 列表包含所有 kube-scheduler 节点 IP;

CN 为 system:kube-scheduler、O 为 system:kube-scheduler,kubernetes 内置的 ClusterRoleBindings system:kube-scheduler 将赋予 kube-scheduler 工作所需的权限

#生成证书

[root@k8s-master work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

[root@k8s-master work]# ls kube-scheduler*.pem

#创建kube-scheduler的kubeconfig

设置集群参数

[root@k8s-master work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.104:6443 --kubeconfig=kube-scheduler.kubeconfig

设置客户端认证参数

[root@k8s-master work]# kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig

设置上下文参数

[root@k8s-master work]# kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

设置默认上下文

[root@k8s-master work]# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

#创建配置文件

[root@k8s-master work]# vim kube-scheduler.conf

KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \

--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \

--leader-elect=true \

--alsologtostderr=true \

--logtostderr=false \

--log-dir=/var/log/kubernetes \

--v=2"

#创建服务启动文件

[root@k8s-master work]# vim kube-scheduler.service

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf

ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

#同步文件

[root@k8s-master work]# cp kube-scheduler*.pem /etc/kubernetes/ssl/

[root@k8s-master work]# cp kube-scheduler.kubeconfig /etc/kubernetes/

[root@k8s-master work]# cp kube-scheduler.conf /etc/kubernetes/

[root@k8s-master work]# cp kube-scheduler.service /usr/lib/systemd/system/

#启动服务

[root@k8s-master work]# systemctl daemon-reload

[root@k8s-master work]# systemctl enable kube-scheduler

[root@k8s-master work]# systemctl start kube-scheduler

[root@k8s-master work]# systemctl status kube-scheduler

​下载依赖镜像(三个节点)

[root ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2

[root ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2

[root ~]# docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2

[root ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0

[root ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0

[root ~]# docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0

​部署kubelet

#创建kubelet-bootstrap.kubeconfig

[root@k8s-master work]# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

设置集群参数

[root@k8s-master work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.104:6443 --kubeconfig=kubelet-bootstrap.kubeconfig

设置客户端认证参数

[root@k8s-master work]# kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig

设置上下文参数

[root@k8s-master work]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

设置默认上下文

[root@k8s-master work]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

创建角色绑定

[root@k8s-master work]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

​创建配置文件

[root@k8s-master work]#  vim kubelet.json

 "kind": "KubeletConfiguration",

 "apiVersion": "kubelet.config.k8s.io/v1beta1",

 "authentication": {

   "x509": {

     "clientCAFile": "/etc/kubernetes/ssl/ca.pem"

   "webhook": {

     "enabled": true,

     "cacheTTL": "2m0s"

   "anonymous": {

     "enabled": false

 "authorization": {

   "mode": "Webhook",

   "webhook": {

     "cacheAuthorizedTTL": "5m0s",

     "cacheUnauthorizedTTL": "30s"

 "address": "192.168.1.104",

 "port": 10250,

 "readOnlyPort": 10255,

 "cgroupDriver": "systemd",                  # 如果docker的驱动为systemd,处修改为systemd。此处设置很重要,否则后面node节点无法加入到集群

 "hairpinMode": "promiscuous-bridge",

 "serializeImagePulls": false,

 "featureGates": {

   "RotateKubeletClientCertificate": true,

   "RotateKubeletServerCertificate": true

 "clusterDomain": "cluster.local.",

 "clusterDNS": ["10.255.0.2"]

​创建启动文件

[root@k8s-master work]# vim kubelet.service

[Unit]

Description=Kubernetes Kubelet

Documentation=https://github.com/kubernetes/kubernetes

After=docker.service

Requires=docker.service

[Service]

WorkingDirectory=/var/lib/kubelet

ExecStart=/usr/local/bin/kubelet \

 --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \

 --cert-dir=/etc/kubernetes/ssl \

 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \

 --config=/etc/kubernetes/kubelet.json \

 --network-plugin=cni \

 --pod-infra-container-image=k8s.gcr.io/pause:3.2 \

 --alsologtostderr=true \

 --logtostderr=false \

 --log-dir=/var/log/kubernetes \

 --v=2

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

–hostname-override:显示名称,集群中唯一

–network-plugin:启用CNI

–kubeconfig:空路径,会自动生成,后面用于连接apiserver

–bootstrap-kubeconfig:首次启动向apiserver申请证书

–config:配置参数文件

–cert-dir:kubelet证书生成目录

–pod-infra-container-image:管理Pod网络容器的镜像

​同步相关文件

[root@k8s-master work]# cp kubelet-bootstrap.kubeconfig /etc/kubernetes/

[root@k8s-master work]# cp kubelet.json /etc/kubernetes/

[root@k8s-master work]# cp kubelet.service /usr/lib/systemd/system/

[root@k8s-master work]# for i in k8s-node01 k8s-node02;do scp kubelet-bootstrap.kubeconfig kubelet.json $i:/etc/kubernetes/;done

[root@k8s-master work]# for i in k8s-node01 k8s-node02;do scp ca.pem $i:/etc/kubernetes/ssl/;done

[root@k8s-master work]# for i in k8s-node01 k8s-node02;do scp kubelet.service $i:/usr/lib/systemd/system/;done

kubelet.json配置文件address改为各个节点的ip地址

​启动服务

各个node节点上操作

[root@node1 ~]# mkdir /var/lib/kubelet

[root@node1 ~]# mkdir /var/log/kubernetes

[root@node1 ~]# systemctl daemon-reload

[root@node1 ~]# systemctl enable kubelet

[root@node1 ~]# systemctl start kubelet

[root@node1 ~]# systemctl status kubelet

​接着到master上Approve一下bootstrap请求

kubectl get csr

kubectl certificate approve node-csr-8lqeDx2tBKM2brxNYoz8pvyhLj_Lt2_u0n7_XcNzsOE

kubectl certificate approve node-csr-Ia8DBM1Fjdjp73GeGv4ccZIV2doRn8M5xF1kzhy7H-o

kubectl get node

​部署kube-proxy

#创建csr请求文件

[root@k8s-master work]# vim kube-proxy-csr.json

 "CN": "system:kube-proxy",

 "key": {

   "algo": "rsa",

   "size": 2048

 "names": [

     "C": "CN",

     "ST": "Shanghai",

     "L": "Shanghai",

     "O": "k8s",

     "OU": "system"

#生成证书

[root@k8s-master work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

[root@k8s-master work]# ls kube-proxy*.pem

#创建kubeconfig文件

[root@k8s-master work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.104:6443 --kubeconfig=kube-proxy.kubeconfig

[root@k8s-master work]# kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig

[root@k8s-master work]# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig

[root@k8s-master work]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#创建kube-proxy配置文件

[root@k8s-master work]# vim kube-proxy.yaml

apiVersion: kubeproxy.config.k8s.io/v1alpha1

bindAddress: 192.168.1.104

clientConnection:

 kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig

clusterCIDR: 192.168.0.0/16                           # 此处网段必须与网络组件网段保持一致,否则部署网络组件时会报错

healthzBindAddress: 192.168.1.104:10256

kind: KubeProxyConfiguration

metricsBindAddress: 192.168.1.104:10249

mode: "ipvs"

#创建服务启动文件

[root@k8s-master work]# vim kube-proxy.service

[Unit]

Description=Kubernetes Kube-Proxy Server

Documentation=https://github.com/kubernetes/kubernetes

After=network.target

[Service]

WorkingDirectory=/var/lib/kube-proxy

ExecStart=/usr/local/bin/kube-proxy \

 --config=/etc/kubernetes/kube-proxy.yaml \

 --alsologtostderr=true \

 --logtostderr=false \

 --log-dir=/var/log/kubernetes \

 --v=2

Restart=on-failure

RestartSec=5

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

#同步文件

[root@k8s-master work]# cp kube-proxy*.pem /etc/kubernetes/ssl/

[root@k8s-master work]# cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/

[root@k8s-master work]# cp kube-proxy.service /usr/lib/systemd/system/

master节点不安装kube-proxy,则以上步骤不用执行

[root@k8s-master work]# for i in k8s-node01 k8s-node02;do scp kube-proxy.kubeconfig kube-proxy.yaml $i:/etc/kubernetes/;done

[root@k8s-master work]# for i in k8s-node01 k8s-node02;do scp kube-proxy.service $i:/usr/lib/systemd/system/;done

注:配置文件kube-proxy.yaml中address修改为各节点的实际IP

#启动服务

[root@node1 ~]# mkdir -p /var/lib/kube-proxy

[root@node1 ~]# systemctl daemon-reload

[root@node1 ~]# systemctl enable kube-proxy

[root@node1 ~]# systemctl restart kube-proxy

[root@node1 ~]# systemctl status kube-proxy

​配置网络组件

[root@k8s-master work]# wget https://docs.projectcalico.org/v3.14/manifests/calico.yaml

[root@k8s-master work]# kubectl apply -f calico.yaml  

此时再来查看各个节点,均为Ready状态

kubectl get nodes  

kubectl get pods -A

​部署coredns

#下载yaml文件

 https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed

clusterIP为:10.255.0.2(kubelet配置文件中的clusterDNS)

[root@master1 work]# cat coredns.yaml  

apiVersion: v1

kind: ServiceAccount

metadata:

 name: coredns

 namespace: kube-system

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

 labels:

   kubernetes.io/bootstrapping: rbac-defaults

 name: system:coredns

rules:

- apiGroups:

 resources:

 - endpoints

 - services

 - pods

 - namespaces

 verbs:

 - list

 - watch

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

 annotations:

   rbac.authorization.kubernetes.io/autoupdate: "true"

 labels:

   kubernetes.io/bootstrapping: rbac-defaults

 name: system:coredns

roleRef:

 apiGroup: rbac.authorization.k8s.io

 kind: ClusterRole

 name: system:coredns

subjects:

- kind: ServiceAccount

 name: coredns

 namespace: kube-system

apiVersion: v1

kind: ConfigMap

metadata:

 name: coredns

 namespace: kube-system

data:

 Corefile: |

   .:53 {

       errors

       health {

         lameduck 5s

       ready

       kubernetes cluster.local  in-addr.arpa ip6.arpa {

         fallthrough in-addr.arpa ip6.arpa

       prometheus :9153

       forward . /etc/resolv.conf {

         max_concurrent 1000

       cache 30

       reload

       loadbalance

apiVersion: apps/v1

kind: Deployment

metadata:

 name: coredns

 namespace: kube-system

 labels:

   k8s-app: kube-dns

   kubernetes.io/name: "CoreDNS"

spec:

 # replicas: not specified here:

 # 1. Default is 1.

 # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.

 strategy:

   type: RollingUpdate

   rollingUpdate:

     maxUnavailable: 1

 selector:

   matchLabels:

     k8s-app: kube-dns

 template:

   metadata:

     labels:

       k8s-app: kube-dns

   spec:

     priorityClassName: system-cluster-critical

     serviceAccountName: coredns

     tolerations:

       - key: "CriticalAddonsOnly"

         operator: "Exists"

     nodeSelector:

       kubernetes.io/os: linux

     affinity:

        podAntiAffinity:

          preferredDuringSchedulingIgnoredDuringExecution:

          - weight: 100

            podAffinityTerm:

              labelSelector:

                matchExpressions:

                  - key: k8s-app

                    operator: In

                    values: ["kube-dns"]

              topologyKey: kubernetes.io/hostname

     containers:

     - name: coredns

       image: coredns/coredns:1.8.0

       imagePullPolicy: IfNotPresent

       resources:

         limits:

           memory: 170Mi

         requests:

           cpu: 100m

           memory: 70Mi

       args: [ "-conf", "/etc/coredns/Corefile" ]

       volumeMounts:

       - name: config-volume

         mountPath: /etc/coredns

         readOnly: true

       ports:

       - containerPort: 53

         name: dns

         protocol: UDP

       - containerPort: 53

         name: dns-tcp

         protocol: TCP

       - containerPort: 9153

         name: metrics

         protocol: TCP

       securityContext:

         allowPrivilegeEscalation: false

         capabilities:

           - NET_BIND_SERVICE

           drop:

         readOnlyRootFilesystem: true

       livenessProbe:

         httpGet:

           path: /health

           port: 8080

           scheme: HTTP

         initialDelaySeconds: 60

         timeoutSeconds: 5

         successThreshold: 1

         failureThreshold: 5

       readinessProbe:

         httpGet:

           path: /ready

           port: 8181

           scheme: HTTP

     dnsPolicy: Default

     volumes:

       - name: config-volume

         configMap:

           name: coredns

           items:

           - key: Corefile

             path: Corefile

apiVersion: v1

kind: Service

metadata:

 name: kube-dns

 namespace: kube-system

 annotations:

   prometheus.io/port: "9153"

   prometheus.io/scrape: "true"

 labels:

   k8s-app: kube-dns

   kubernetes.io/cluster-service: "true"

   kubernetes.io/name: "CoreDNS"

spec:

 selector:

   k8s-app: kube-dns

 clusterIP: 10.255.0.2

 ports:

 - name: dns

   port: 53

   protocol: UDP

 - name: dns-tcp

   port: 53

   protocol: TCP

 - name: metrics

   port: 9153

   protocol: TCP

[root@master1 work]# kubectl apply -f coredns.yaml 

#部署nginx

[root@k8s-master ~]# vim nginx.yaml  

apiVersion: v1

kind: ReplicationController

metadata:

 name: nginx-controller

spec:

 replicas: 2

 selector:

   name: nginx

 template:

   metadata:

     labels:

       name: nginx

   spec:

     containers:

       - name: nginx

         image: nginx:1.19.6

         ports:

           - containerPort: 80

apiVersion: v1

kind: Service

metadata:

 name: nginx-service-nodeport

spec:

 ports:

   - port: 80

     targetPort: 80

     nodePort: 30001

     protocol: TCP

 type: NodePort

 selector:

   name: nginx

[root@master1 ~]# kubectl apply -f nginx.yaml

[root@master1 ~]# kubectl get svc

[root@master1 ~]# kubectl get pods

#访问nginx

 http://192.168.1.107:30001/

#测试dns

[root@k8s-master ~]# kubectl exec -it nginx-controller-5rrjs -- /bin/bash

root@nginx-controller-5rrjs:/# apt-get update

root@nginx-controller-5rrjs:/# apt-get install inetutils-ping

root@nginx-controller-5rrjs:/# ping nginx-service-nodeport

链接: ​ ​https://ke.qq.com/course/4300856?tuin=d8aedf68​​​


Recommend

About Joyk


Aggregate valuable and interesting links.
Joyk means Joy of geeK