Kubernetes集群搭建
基础环境
网站建设哪家好,找创新互联!专注于网页设计、网站建设、微信开发、小程序开发、集团企业网站建设等服务项目。为回馈新老客户创新互联还提供了西丰免费建站欢迎大家使用!
系统环境# cat /etc/redhat-releaseCentOS Linux release 7.3.1611 (Core)
主机名设置
centos-master 192.168.59.135
centos-minion1 192.168.59.132
centos-minion2 192.168.59.133
关闭selinux 和 firewalld 后重启服务器
# systemctl stop firewalld
# systemctl disable firewalld
# setenforce 0
# sed -i 's/^SELINUX=.*/SELINUX=disableds/' /etc/selinux/config
三个节点 安装并部署etcd集群
# yum install etcd -y
安装版本
# rpm -qa | grep etcd
etcd-3.2.7-1.el7.x86_64
配置ETCD /etc/etcd/etcd.conf
Master etcd 配置
# cat /etc/etcd/etcd.conf | grep -Ev "^#|^$"
ETCD_NAME=centos-master
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.135:2380"
ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.135:2379"
Minion2 etcd 配置
# grep -Ev "^#|^$" /etc/etcd/etcd.conf
ETCD_NAME=centos-minion2
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.133:2380"
ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.133:2379"
Minion1 etcd 配置
# grep -Ev "^#|^$" /etc/etcd/etcd.conf
ETCD_NAME=centos-minion1
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.132:2379"
启动etcd集群(三个节点) 并检查状态(任意一台上操作)
# 启动etcd
# systemctl start etcd
# systemctl enable etcd
# 查看状态
# etcdctl member list
10a23ff41e3abcb8: name=centos-minion1 peerURLs=http://192.168.59.132:2380 clientURLs=http://192.168.59.132:2379 isLeader=false
168ea6ce7632b2e4: name=centos-minion2 peerURLs=http://192.168.59.133:2380 clientURLs=http://192.168.59.133:2379 isLeader=true
587d83f824bf96c6: name=centos-master peerURLs=http://192.168.59.135:2380 clientURLs=http://192.168.59.135:2379 isLeader=false
# etcdctl cluster-health
member 10a23ff41e3abcb8 is healthy: got healthy result from http://192.168.59.132:2379
member 168ea6ce7632b2e4 is healthy: got healthy result from http://192.168.59.133:2379
member 587d83f824bf96c6 is healthy: got healthy result from http://192.168.59.135:2379
cluster is healthy
kubernetes master 节点安装部署
#yum install kubernetes -y
安装的版本
# rpm -qa | grep kubernetes
kubernetes-client-1.5.2-0.7.git269f928.el7.x86_64
kubernetes-1.5.2-0.7.git269f928.el7.x86_64
kubernetes-master-1.5.2-0.7.git269f928.el7.x86_64
kubernetes-node-1.5.2-0.7.git269f928.el7.x86_64
配置kubernetes API Server (/etc/kubernetes/apiserver)
# cat /etc/kubernetes/apiserver | grep -Ev "^#|^$"
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS=""
配置kubernetes config (/etc/kubernetes/config)
# cat /etc/kubernetes/config | grep -Ev "^#|^$"
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://centos-master:8080"
kubernetes minion 节点安装(minion1和minion2)
# yum install flannel docker kubernetes -y
配置 flannel (/etc/sysconfig/flanneld)
# grep -Ev "^#|^$" /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://192.168.59.133:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
配置 kubelet (/etc/kubernetes/kubelet)
# grep -Ev "^#|^$" /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=centos-minion2"
KUBELET_API_SERVER="--api-servers=http://centos-master:8080"
# 下面请填写你的registry地址,如果你能连接到任何网络,请自动过滤
# KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=192.168.59.133:5000/pod-infrastructure:latest"
# 下面填写你的DNS信息和网络信息
KUBELET_ARGS="--cluster-dns=192.168.51.198 --cluster-domain=atomic.io/network"
启动程序
kubernetes master
for SERVICES in kube-apiserver kube-controller-manager kube-scheduler; dosystemctl restart $SERVICESsystemctl enable $SERVICESsystemctl status $SERVICES -ldone
etcd 网络配置
# etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
kubernetes minion
for SERVICES in kube-proxy kubelet docker flanneld; do
systemctl restart $SERVICES
systemctl enable $SERVICES
systemctl status $SERVICES
done
查看 节点情况(在 master)
# kubectl get nodes
NAME STATUS AGE
centos-minion1 Ready 1h
centos-minion2 Ready 1h
查看flannel网卡
[root@centos-minion1 ~]# ifconfig flannel0
flannel0: flags=4305 mtu 1472
inet 172.17.34.0 netmask 255.255.0.0 destination 172.17.34.0
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@centos-minion2 ~]# ifconfig flannel0
flannel0: flags=4305 mtu 1472
inet 172.17.59.0 netmask 255.255.0.0 destination 172.17.59.0
inet6 fe80::2d54:2169:1a0:d364 prefixlen 64 scopeid 0x20
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3 bytes 144 (144.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
简单测试 (在master上创建一个Pod、Service 和 RC)
# ls
http-pod.yaml http-rc.yaml http-service.yaml
# cat http-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: http-pod
labels:
name: http-pod
spec:
containers:
- name: http
image: 192.168.59.133:5000/centos6-http
ports:
- containerPort: 80
# cat http-service.yaml
apiVersion: v1
kind: Service
metadata:
name: http-service
spec:
type: NodePort
ports:
- port: 80
nodePort: 30001
selector:
name: http-pod
# cat http-rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: http-rc
spec:
replicas: 2
selector:
name: http-pod
template:
metadata:
labels:
name: http-pod
spec:
containers:
- name: http-pod
image: 192.168.59.133:5000/centos6-http
ports:
- containerPort: 80
创建Pod
# kubectl create -f http-pod.yaml
pod "http-pod" created
# kubectl get pods
NAME READY STATUS RESTARTS AGE
http-pod 1/1 Running 0 4s
创建Service
# kubectl create -f http-service.yaml
service "http-service" created
# kubectl get service
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
http-service 10.254.235.49 80:30001/TCP 5s
kubernetes 10.254.0.1 443/TCP 1d
查看 pod在哪个节点上生成
# kubectl describe service
Name: http-service
Namespace: default
Labels:
Selector: name=http-pod
Type: NodePort
IP: 10.254.235.49
Port: 80/TCP
NodePort: 30001/TCP
Endpoints: 172.17.59.3:80 #这个地址是flannel的地址 为minion2
Session Affinity: None
No events.
Name: kubernetes
Namespace: default
Labels: component=apiserver
provider=kubernetes
Selector:
Type: ClusterIP
IP: 10.254.0.1
Port: https 443/TCP
Endpoints: 192.168.59.135:6443
Session Affinity: ClientIP
No events.
访问pod 会显示出http默认的欢迎页面
# curl http://192.168.59.133:30001/
创建RC
# kubectl create -f http-rc.yaml
replicationcontroller "http-rc" created
# kubectl get rc
NAME DESIRED CURRENT READY AGE
http-rc 2 2 2 8s
# kubectl get pods # 本来是一个现在有2个
NAME READY STATUS RESTARTS AGE
http-pod 1/1 Running 0 9m
http-rc-b24kx 1/1 Running 0 13s
现在删除一个pod 看能不能在生成pod
# kubectl delete pod http-pod
pod "http-pod" deleted
# kubectl get pods
NAME READY STATUS RESTARTS AGE
http-rc-8cl5p 1/1 Running 0 2s
http-rc-b24kx 1/1 Running 0 2m
# kubectl delete pod http-rc-8cl5p http-rc-b24kx
pod "http-rc-8cl5p" deleted
pod "http-rc-b24kx" deleted
# kubectl get pods
NAME READY STATUS RESTARTS AGE
http-rc-xxtrw 1/1 Running 0 3s
http-rc-z8t9n 1/1 Running 0 3s
删除了2次,最后都同样有两个pod生成,经测试都可以正常访问
查看pod的描述
# kubectl describe pod
Name: http-rc-xxtrw
Namespace: default
Node: centos-minion2/192.168.59.133
Start Time: Tue, 31 Oct 2017 16:05:51 +0800
Labels: name=http-pod
Status: Running
IP: 172.17.59.4
Controllers: ReplicationController/http-rc
Containers:
http-pod:
Container ID: docker://a3338c455a27540c8f7b7b3f01fa3862b1082f7ae47e9b3761610b4a6043245b
Image: 192.168.59.133:5000/centos6-http
Image ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715
Port: 80/TCP
State: Running
Started: Tue, 31 Oct 2017 16:05:53 +0800
Ready: True
Restart Count: 0
Volume Mounts:
Environment Variables:
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations:
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-xxtrw to centos-minion2
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Created Created container with docker id a3338c455a27; Security:[seccomp=unconfined]
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Started Started container with docker id a3338c455a27
Name: http-rc-z8t9n
Namespace: default
Node: centos-minion1/192.168.59.132
Start Time: Tue, 31 Oct 2017 16:05:52 +0800
Labels: name=http-pod
Status: Running
IP: 172.17.34.3
Controllers: ReplicationController/http-rc
Containers:
http-pod:
Container ID: docker://6b4fbca3f6a8690f24fe749556323a6be85f5122f378a076a8bf9d0556a89b6e
Image: 192.168.59.133:5000/centos6-http
Image ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715
Port: 80/TCP
State: Running
Started: Tue, 31 Oct 2017 16:05:54 +0800
Ready: True
Restart Count: 0
Volume Mounts:
Environment Variables:
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations:
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-z8t9n to centos-minion1
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Created Created container with docker id 6b4fbca3f6a8; Security:[seccomp=unconfined]
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Started Started container with docker id 6b4fbca3f6a8
网站题目:Kubernetes集群搭建
转载注明:http://myzitong.com/article/ghpsos.html