编辑/etc/kubernetes/apiserver文件

KUBE_ADMISSION_CONTROL=”–admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota”

#查看是否启动

  1. Node 节点

集群示意图

11:KUBELET_HOSTNAME=”–hostname-override=192.168.56.140″

 

KUBE_LOGTOSTDERR=”–logtostderr=true”

total 12

  1. 重新安装组件
    [root@CNT7XDCK02 ~]# yum -y install flannel
    [root@CNT7XDCK02 ~]# yum -y install kubernetes-node

KUBE_ETCD_SERVERS=”–etcd-servers=”

-rw-r–r– 1 root root 655 Jul  3 23:33 config             #需要配置

修改/etc/kubernetes/config文件

KUBELET_POD_INFRA_CONTAINER=”–pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest”

1.3.2  安装docker及iptables

ca88会员登录入口 1

3.编辑/etc/kubernetes/apiserver文件

kube-controller-manager.service             disabled     #需要启动

root>> yum list installed | grep kube

[root@cmmaster
~]# etcdctl mk /atomic.io/network/config
‘{“Network”:”172.17.0.0/16″}’

[root@k8s-master ~]# systemctl enable kube-apiserver.service kube-controller-manager.service  kube-scheduler.service

 

部署1台Kubernetes
Master节点和3台Minion节点,

2.4 slave配置

5.1 Master 节点

#
yum -y install epel-release

启动并加入开机自启动

三、进程方式检查

KUBELET_HOSTNAME=”–hostname-override=192.168.137.148″ 
           #修改成对应Node的IP

kube-proxy.service                          disabled

  1. 卸载之前组件

KUBELET_API_SERVER=”–api-servers=” 
   #指定Master节点的API Server

2.3 slave 安装 kubernetes-node

  1. Node 节点:

[root@cmmaster
~]# kubectl get node

11:KUBE_API_PORT=”–port=8080″

如下,可以看到master拥有四个node节点机器,状态是Ready正常的。ca88会员登录入口 2

NAME 
            STATUS    AGE

yum install docker iptables-services.x86_64 -y

  1. Node 节点:

#
systemctl status  kube-scheduler.service

[root@k8s-master etcd]# vim etcd.conf 

  1. Node 节点:

安装配置Kubernetes
Node

-rw-r–r– 1 root root 189 Jul  3 23:33 controller-manager #需要配置

 

#
systemctl status  kube-apiserver.service

MASTER

# Flanneld configuration options  

# etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.3.96:2379"

# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"

# Any additional options that you want to pass
#FLANNEL_OPTIONS=""

192.168.137.142
cmmaster

[root@k8s-node1 ~]# cd /etc/kubernetes/

  1. 重新安装组件
    [root@CNT7XDCK01 ~]# yum -y install etcd
    [root@CNT7XDCK01 ~]# yum -y install kubernetes-master

  -
etcd 一个高可用的K/V键值对存储和服务发现系统

active

5.2 Node 节点

FLANNEL_ETCD_KEY=”/atomic.io/network”

-rw-r–r– 1 root root 103 Jul  3 23:33 proxy

ca88会员登录入口 3

KUBE_SERVICE_ADDRESSES=”–service-cluster-ip-range=10.254.0.0/16″

-rw-r–r– 1 root root 111 Jul  3 23:33 scheduler          #需要配置

  1. Master 节点:

KUBE_LOG_LEVEL=”–v=0″

[root@k8s-node1 kubernetes]# vim config 

[root@CNT7XDCK01 ~]# kubectl get nodes
NAME            STATUS    AGE
192.168.3.100   Ready     35d
192.168.3.97    Ready     35d
192.168.3.98    Ready     35d
192.168.3.99    Ready     35d

#
systemctl status  kube-controller-manager.service

5:KUBELET_ADDRESS=”–address=0.0.0.0″

 二、服务方式检查

KUBE_API_ARGS=””

kube-proxy.service                          disabled

 

  -
kube-controller-manager 确保集群服务

2.1   master安装kubernetes etcd

  1. Master 节点:

  -
kube-proxy 提供网络代理服务

#config 

编辑/etc/etcd/etcd.conf文件

2.为flannel网络指定etcd服务,修改/etc/sysconfig/flanneld文件

[root@k8s-node1 ~]# systemctl enable kube-proxy.service kubelet.service 

###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"

# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://192.168.3.96:8080"

192.168.137.212
cmnode3

kubelet.service                             disabled    

修改/etc/sysconfig/flanneld文件

•验证集群是否安装成功

[root@k8s-node1 sysconfig]# systemctl is-active flanneld.service

root>> yum list installed | grep flannel

#
yum -y install etcd kubernetes-master

#开机启动

 

192.168.137.148
cmnode1

[root@k8s-master ~]# systemctl list-unit-files |grep kube

root>> systemctl status etcd
root>> systemctl status kube-apiserver
root>> systemctl status kube-controller-manager
root>> systemctl status kube-scheduler

  Kubernetes工作模式server-client,Kubenetes
Master提供集中化管理Minions。

[root@k8s-master ~]# yum install kubernetes etcd -y

###
# kubernetes kubelet (minion) config

# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=0.0.0.0"

# The port for the info server to serve on
KUBELET_PORT="--port=10250"

# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=192.168.3.97" # 这里是node机器的IP

# location of the api-server
KUBELET_API_SERVER="--api-servers=http://192.168.3.96:8080" # 这里是master机器的IP

# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"

# Add your own!
KUBELET_ARGS=""

4.启动etcd、kube-apiserver、kube-controller-manager、kube-scheduler等服务,并设置开机启动。

[root@k8s-master ~]# etcdctl set /atomic.io/network/config ‘{ “Network”: “172.17.0.0/16” }’

====================================================================

4.按照如下内容修改对应node的配置文件/etc/kubernetes/kubelet

active

四、安装包方式检查

-Master节点

NAME             STATUS    AGE

[root@CNT7XDCK01 ~]# yum list installed | grep kube  #首先查询组件
kubernetes-client.x86_64                1.5.2-0.7.git269f928.el7      
@extras  
kubernetes-master.x86_64                1.5.2-0.7.git269f928.el7      
@extras  
[root@CNT7XDCK01 ~]# yum remove -y kubernetes-client.x86_64
[root@CNT7XDCK01 ca88会员登录入口,~]# yum remove -y kubernetes-master.x86_64

192.168.137.147 
 Ready     7m

[root@k8s-node1 ~]# systemctl list-unit-files |grep kube

root>> yum list installed | grep kube

KUBELET_PORT=”–kubelet-port=10250″

#controller-manager 

root>> yum list installed | grep kube

KUBELET_PORT=”–port=10250″

ca88会员登录入口 4

  1. 重新注册/启动/检查:组件的系统服务
    [root@CNT7XDCK01 ~]# systemctl enable etcd
    [root@CNT7XDCK01 ~]# systemctl enable kube-apiserver
    [root@CNT7XDCK01 ~]# systemctl enable kube-controller-manager
    [root@CNT7XDCK01 ~]# systemctl enable kube-scheduler

1.安装flannel
kubernetes-node

 systemctl start docker

  1. 配置相关kube的配置文件

192.168.137.199 
 Ready     7m

启动服务并加入开机自启动

root>> ps -ef | grep flannel

#
systemctl status  etcd.service

Linux localhost.localdomain 3.10.0-514.6.1.el7.x86_64 #1 SMP Wed Jan 18 13:06:36 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux

 

  -
flannel 实现夸主机的容器网络的通信

etcd

 

FLANNEL_ETCD=””

-rw-r–r– 1 root root 103 Jul  3 23:33 proxy

root>> yum list installed | grep etcd

安装EPEL源,在所有节点上

{ “Network”: “172.17.0.0/16” }

 

KUBELET_ADDRESS=”–address=0.0.0.0″ 
                                 #将127.0.0.1修改成0.0.0.0

启动并加入开机自动:

五,附上第一次安装k8s集群失败后,后面重新安装k8s的一些环境重置的命令。

ca88会员登录入口 5

  物理机操作系统采用Centos7.3 64位,细节如下。

 

  -
kube-apiserver 提供kubernetes集群的API调用

[root@k8s-master kubernetes]# vim config 

 

ETCD_LISTEN_CLIENT_URLS=””

# kubelet

[root@CNT7XDCK02 ~]# yum list installed | grep kube
kubernetes-client.x86_64             1.5.2-0.7.git269f928.el7         
@extras  
kubernetes-node.x86_64               1.5.2-0.7.git269f928.el7         
@extras
[root@CNT7XDCK02 ~]# yum remove -y kubernetes-client.x86_64
[root@CNT7XDCK02 ~]# yum remove -y kubernetes-node.x86_64

3.修改/etc/kubernetes/config文件

1.1 物理机操作系统

 

KUBE_ALLOW_PRIV=”–allow-privileged=false”

systemctl enable flanneld.service

  1. Master节点:

  -
kube-scheduler 调度容器,分配到Node

# apiserver

  1. 配置相关kube的配置文件

KUBE_API_PORT=”–port=8080″

systemctl start flanneld.service

[root@CNT7XDCK01 ~]# systemctl status etcd
[root@CNT7XDCK01 ~]# systemctl status kube-apiserver
[root@CNT7XDCK01 ~]# systemctl status kube-controller-manager
[root@CNT7XDCK01 ~]# systemctl status kube-scheduler

ETCD_NAME=default

  本文准备了三台机器用于部署k8s的运行环境,细节如下:

 

在master上执行如下命令

#查看flannel状态

ca88会员登录入口 6

KUBE_MASTER=”–master=”

1.3.3 关闭默认firewalld启动iptables并清除默认规则

 

安装配置Kubernetes
Master,在Master节点上

iptables -F

ca88会员登录入口 7

yum
-y install flannel kubernetes-node

1.2 主机信息

六、最后,在Master机器,查看K8s安装结果

KUBE_API_ADDRESS=”–insecure-bind-address=0.0.0.0″

Master:

一、组件方式检查

1.使用yum安装etcd和kubernetes-master

systemctl stop firewalld

ca88会员登录入口 8

192.168.137.148 
 Ready     1m

#启动并加入开机自启动

  1. 卸载之前组件

启动etcd、kube-apiserver、kube-controller-manager、kube-scheduler等服务,并设置开机启动。

8:KUBELET_PORT=”–port=10250″

 

#
for SERVICES in kube-proxy kubelet docker flanneld;do systemctl restart
$SERVICES;systemctl enable $SERVICES;systemctl status $SERVICES;
done

17:KUBE_ETCD_SERVERS=”–etcd-servers=”

 

KUBELET_ARGS=””

8:KUBE_API_ADDRESS=”–insecure-bind-address=0.0.0.0″

ETCD_NAME="default"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379"

Kubernetes集群组件:

2.2 配置

###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#

# The address on the local server to listen to.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

# The port on the local server to listen on.
KUBE_API_PORT="--port=8080"

# Port minions listen on
KUBELET_PORT="--kubelet-port=10250"

# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379"

# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

# default admission control policies
# KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"

# Add your own!
KUBE_API_ARGS=""

ETCD_ADVERTISE_CLIENT_URLS=””

service iptables save

root>> ps -ef | grep etcd

上述节点正常显示,状态为Ready,则说明集群搭建成功

[root@k8s-master kubernetes]# vim  controller-manager 

 

  -
kubelet 在Node节点上按照配置文件中定义的容器规格启动容器

1.3.1 主机名修改

 

2.编辑/etc/etcd/etcd.conf文件

#修改

 

5.在etcd中定义flannel网络

total 24

 

5.在所有Node节点上启动kube-proxy,kubelet,docker,flanneld等服务,并设置开机启动

4:FLANNEL_ETCD_ENDPOINTS=””

  1. 重新注册/启动/检查:组件的系统服务
    [root@CNT7XDCK02 ~]# systemctl enable flanneld
    [root@CNT7XDCK02 ~]# systemctl enable kube-proxy
    [root@CNT7XDCK02 ~]# systemctl enable kubelet
    [root@CNT7XDCK02 ~]# systemctl enable docker

-Minion节点

 systemctl enable docker

 

Kubernetes集群安装部署

kube-scheduler.service                      disabled     #需要启动 

root>> ps -ef | grep kube

如下操作在cmnode1、cmnode2、cmnode3上执行

一、环境介绍及准备

ca88会员登录入口 9

for
SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler;
do systemctl restart $SERVICES;systemctl enable $SERVICES;systemctl
status $SERVICES ; done

[root@localhost ~]#  hostnamectl –static set-hostname  k8s-node2

[root@CNT7XDCK02 ~]# systemctl status flanneld
[root@CNT7XDCK02 ~]# systemctl status kube-proxy
[root@CNT7XDCK02 ~]# systemctl status kubelet
[root@CNT7XDCK02 ~]# systemctl status docker

ETCD_DATA_DIR=”/var/lib/etcd/default.etcd”

[root@localhost ~]# uname -a

修改/etc/kubernetes/kubelet文件

192.168.137.199
cmnode2

192.168.56.150   Ready     54m

active

  1. Master 节点:

[root@k8s-node1 sysconfig]# vim flanneld 

[root@CNT7XDCK01 ~]# systemctl restart etcd
[root@CNT7XDCK01 ~]# systemctl restart kube-apiserver
[root@CNT7XDCK01 ~]# systemctl restart kube-controller-manager
[root@CNT7XDCK01 ~]# systemctl restart kube-scheduler

active

ca88会员登录入口 10

[root@k8s-node1 ~]# systemctl is-active kube-proxy.service kubelet.service 

root>> systemctl status flanneld
root>> systemctl status kube-proxy
root>> systemctl status kubelet
root>> systemctl status docker

kubelet.service                             disabled

root>> kubectl get cs

注意:启动顺序 etcd–>kubernetes*

ca88会员登录入口 11

[root@localhost ~]#  hostnamectl –static set-hostname  k8s-master

ca88会员登录入口 12

active

[root@CNT7XDCK02 ~]# systemctl restart flanneld
[root@CNT7XDCK02 ~]# systemctl restart kube-proxy
[root@CNT7XDCK02 ~]# systemctl restart kubelet
[root@CNT7XDCK02 ~]# systemctl restart docker

-rw-r–r– 1 root root 615 Jul  3 23:33 kubelet #需要配置

14:KUBELET_PORT=”–kubelet-port=10250″

8:KUBELET_ADDRESSES=”–machines=192.168.56.140,192.168.56.150″  #增加配置

[root@k8s-master etcd]# systemctl start etcd

[root@k8s-master ~]# cd /etc/kubernetes/

[root@k8s-node1 ~]# systemctl start kube-proxy.service kubelet.service 

#flannel配置

systemctl disable firewalld

[root@k8s-master kubernetes]# vim apiserver 

Node1:

[root@localhost ~]#  hostnamectl –static set-hostname  k8s-node1

14:KUBELET_API_SERVER=”–api-servers=”

时间:2017年11月21日 浏览量:308

20:ETCD_ADVERTISE_CLIENT_URLS=””

[root@k8s-master ~]# systemctl start kube-apiserver.service kube-controller-manager.service  kube-scheduler.service  

kubernetes集群配置完成!

注意:此时flannel启动不了,之所以启动不起来是因为etcd里面没有flannel所需要的网络信息,此时我们需要在etcd里面创建flannel所需要的网络信息

-rw-r–r– 1 root root 655 Jul  3 23:33 config  #需要配置

kube-apiserver.service                      disabled     #需要启动

[root@k8s-master etcd]# systemctl enable etcd

1.3.4 启动docker并加入开机自启动

9:ETCD_LISTEN_CLIENT_URLS=””

-rw-r–r– 1 root root 615 Jul  3 23:33 kubelet

[root@k8s-master ~]# systemctl is-active  kube-apiserver.service kube-controller-manager.service kube-scheduler.service 

yum install kubernetes-node.x86_64 flannel -y

Centos7.3 Kubernetes集群部署

Node2:

22:KUBE_MASTER=”–master=”

22:KUBE_MASTER=”–master=”

#修改

master创建 flannel所需要的网络信息:

systemctl enable iptables

[root@k8s-master kubernetes]# ll

[root@k8s-node1 kubernetes]# cd /etc/sysconfig/

[root@localhost ~]# cat /etc/redhat-release 

#启动

-rw-r–r– 1 root root 767 Jul  3 23:33 apiserver          #需要配置

active

[root@k8s-master ~]# kubectl get node

CentOS Linux release 7.3.1611 (Core)

kubernetes

[root@k8s-master ~]# cd /etc/etcd/

SLAVE 两个节点相同

2.5 集群检查

23:KUBE_ADMISSION_CONTROL=”–admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota”

#配置config

[root@k8s-node1 kubernetes]# ll

1.3 环境准备

二、K8S集群部署

systemctl start iptables

192.168.56.140   Ready     56m

Author

发表评论

电子邮件地址不会被公开。 必填项已用*标注