Running Container Clusters with Kubernetes

[root@ansible ~]# vim /etc/hosts
192.168.183.128 ansible.example.com
192.168.183.131 node1.example.com
192.168.183.132 node2.example.com

[root@ansible ~]# ip a
inet 192.168.183.128/24 brd 192.168.183.255 scope global dynamic eno16777736
valid_lft 1732sec preferred_lft 1732sec
inet6 fe80::20c:29ff:fe5b:7384/64 scope link
valid_lft forever preferred_lft forever

[root@ansible ~]# setenforce 0;systemctl disable iptables-services firewalld;systemctl stop iptables-services firewalld

[root@ansible ~]# yum install ntp -y

[root@ansible ~]# systemctl start ntpd ;systemctl enable ntpd;systemctl status ntpd

[root@ansible ~]# vim /etc/yum.repos.d/virt7-docker-common-release.repo
[virt7-docker-common-release]
name=virt7-docker-common-release
baseurl=http://cbs.centos.org/repos/virt7-docker-common-release/x86_64/os/
gpgcheck=0
[root@ansible ~]# yum -y install –enablerepo=virt7-docker-common-release kubernetes etcd flannel

 

[root@ansible ~]# vim /etc/kubernetes/config
KUBE_LOGTOSTDERR=”–logtostderr=true”
KUBE_LOG_LEVEL=”–v=0″
KUBE_ALLOW_PRIV=”–allow-privileged=false”
KUBE_MASTER=”–master=http://ansible.example.com:8080″

 

[root@ansible ~]# vim /etc/etcd/etcd.conf
# [member]
ETCD_NAME=default
ETCD_DATA_DIR=”/var/lib/etcd/default.etcd”
ETCD_LISTEN_CLIENT_URLS=”http://0.0.0.0:2379″

#[cluster]
ETCD_ADVERTISE_CLIENT_URLS=”http://0.0.0.0:2379″

 

[root@ansible ~]# vim /etc/kubernetes/apiserver

# The address on the local server to listen to.
KUBE_API_ADDRESS=”–address=0.0.0.0″

# The port on the local server to listen on.
KUBE_API_PORT=”–port=8080″

# Port minions listen on
KUBELET_PORT=”–kubelet-port=10250″

# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS=”–etcd-servers=http://ansible.example.com:2379″

# Address range to use for services
KUBE_SERVICE_ADDRESSES=”–service-cluster-ip-range=10.254.0.0/16″

# Add your own!
KUBE_API_ARGS=””

[root@ansible ~]# systemctl start etcd

[root@ansible ~]# etcdctl mkdir /kube-centos/network

[root@ansible ~]# etcdctl mk /kube-centos/network/config “{ \”Network\”: \”172.30.0.0/16\”, \”SubnetLen\”: 24, \”Backend\”: { \”Type\”: \”vxlan\” } }”
{ “Network”: “172.30.0.0/16”, “SubnetLen”: 24, “Backend”: { “Type”: “vxlan” } }

 

[root@ansible ~]# vim /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS=”http://ansible.example.com:2379″
FLANNEL_ETCD_PREFIX=”/kube-centos/network”

 

[root@ansible ~]# for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler flanneld; do
systemctl restart $SERVICES
systemctl enable $SERVICES
systemctl status $SERVICES
done

 

[root@ansible ~]# netstat -tlpn
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 127.0.0.1:2380 0.0.0.0:* LISTEN 2245/etcd
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1096/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 2124/master
tcp6 0 0 :::10251 :::* LISTEN 2328/kube-scheduler
tcp6 0 0 :::6443 :::* LISTEN 2277/kube-apiserver
tcp6 0 0 :::2379 :::* LISTEN 2245/etcd
tcp6 0 0 :::10252 :::* LISTEN 2304/kube-controlle
tcp6 0 0 :::8080 :::* LISTEN 2277/kube-apiserver
tcp6 0 0 :::22 :::* LISTEN 1096/sshd
tcp6 0 0 ::1:25 :::* LISTEN 2124/master

[root@ansible ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:5b:73:84 brd ff:ff:ff:ff:ff:ff
inet 192.168.183.128/24 brd 192.168.183.255 scope global dynamic eno16777736
valid_lft 1320sec preferred_lft 1320sec
inet6 fe80::20c:29ff:fe5b:7384/64 scope link
valid_lft forever preferred_lft forever
3: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN
link/ether 36:6b:58:8c:f2:04 brd ff:ff:ff:ff:ff:ff
inet 172.30.50.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::346b:58ff:fe8c:f204/64 scope link
valid_lft forever preferred_lft forever
[root@ansible ~]# kubectl get nodes
NAME STATUS AGE
192.168.183.131 Ready 52s

[root@ansible ~]# kubectl get nodes
NAME STATUS AGE
192.168.183.131 NotReady 2m

###########                   Node 1          #####################

[root@node1 ~]# setenforce 0;systemctl disable iptables-services firewalld;systemctl stop iptables-services firewalld

[root@node1 ~]# yum install ntp -y

[root@node1 ~]# systemctl start ntpd ;systemctl enable ntpd;systemctl status ntpd
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service.
● ntpd.service – Network Time Service
Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2017-05-11 20:56:10 IST; 121ms ago
Main PID: 2844 (ntpd)
CGroup: /system.slice/ntpd.service
└─2844 /usr/sbin/ntpd -u ntp:ntp -g

May 11 20:56:10 node1.example.com ntpd[2844]: 0.0.0.0 c01d 0d kern kernel time sync enabled
May 11 20:56:10 node1.example.com ntpd[2844]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
May 11 20:56:10 node1.example.com ntpd[2844]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
May 11 20:56:10 node1.example.com ntpd[2844]: Listen and drop on 1 v6wildcard :: UDP 123
May 11 20:56:10 node1.example.com ntpd[2844]: Listen normally on 2 lo 127.0.0.1 UDP 123
May 11 20:56:10 node1.example.com ntpd[2844]: Listen normally on 3 eno16777736 192.168.183.131 UDP 123
May 11 20:56:10 node1.example.com ntpd[2844]: Listen normally on 4 lo ::1 UDP 123
May 11 20:56:10 node1.example.com ntpd[2844]: Listen normally on 5 eno16777736 fe80::20c:29ff:fe18:d6ae UDP 123
May 11 20:56:10 node1.example.com ntpd[2844]: Listening on routing socket on fd #22 for interface updates
May 11 20:56:10 node1.example.com systemd[1]: Started Network Time Service.

[root@node1 ~]# vim /etc/yum.repos.d/virt7-docker-common-release.repo
[virt7-docker-common-release]
name=virt7-docker-common-release
baseurl=http://cbs.centos.org/repos/virt7-docker-common-release/x86_64/os/
gpgcheck=0

[root@node1 ~]# yum -y install –enablerepo=virt7-docker-common-release kubernetes etcd flannel docker

[root@node1 ~]# vim /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS=”http://ansible.example.com:2379&#8243;
FLANNEL_ETCD_PREFIX=”/kube-centos/network”
[root@node1 ~]# vim /etc/kubernetes/kubelet
KUBELET_ADDRESS=”–address=0.0.0.0″
KUBELET_PORT=”–port=10250″
KUBELET_HOSTNAME=”–hostname-override=192.168.183.131″
KUBELET_API_SERVER=”–api-servers=http://192.168.183.128:8080″
KUBELET_ARGS=””

 

[root@node1 ~]# for SERVICES in kube-proxy kubelet docker flanneld; do
> systemctl start $SERVICES
> systemctl enable $SERVICES
> systemctl status $SERVICES
> done

[root@node1 ~]# systemctl status flanneld
● flanneld.service – Flanneld overlay address etcd agent
Loaded: loaded (/usr/lib/systemd/system/flanneld.service; enabled; vendor preset: disabled)
Active: activating (start) since Thu 2017-05-11 16:32:27 IST; 29s ago
Main PID: 2754 (flanneld)
Memory: 4.6M
CGroup: /system.slice/flanneld.service
└─2754 /usr/bin/flanneld -etcd-endpoints=http://ansible.example.com:2379 -etcd-prefix=/kube-centos/network

May 11 16:32:39 node1.example.com flanneld-start[2754]: E0511 16:32:39.075376 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:41 node1.example.com flanneld-start[2754]: E0511 16:32:41.076562 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:43 node1.example.com flanneld-start[2754]: E0511 16:32:43.077756 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:45 node1.example.com flanneld-start[2754]: E0511 16:32:45.085064 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:47 node1.example.com flanneld-start[2754]: E0511 16:32:47.086350 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:49 node1.example.com flanneld-start[2754]: E0511 16:32:49.087575 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:51 node1.example.com flanneld-start[2754]: E0511 16:32:51.088665 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:53 node1.example.com flanneld-start[2754]: E0511 16:32:53.089557 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:55 node1.example.com flanneld-start[2754]: E0511 16:32:55.090505 2754 network.go:102] failed to retrieve network config…igured
May 11 16:32:57 node1.example.com flanneld-start[2754]: E0511 16:32:57.091709 2754 network.go:102] failed to retrieve network config…igured
Hint: Some lines were ellipsized, use -l to show in full.

[root@node1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:18:d6:ae brd ff:ff:ff:ff:ff:ff
inet 192.168.183.131/24 brd 192.168.183.255 scope global dynamic eno16777736
valid_lft 1704sec preferred_lft 1704sec
inet6 fe80::20c:29ff:fe18:d6ae/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:41:bc:42:58 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 scope global docker0
valid_lft forever preferred_lft forever

[root@node1 ~]# cat /etc/hosts
192.168.183.131 node1.example.com
192.168.183.128 ansible.example.com

 

[root@node1 ~]# kubectl config set-cluster default-cluster –server=http://ansible.example.com:8080
Cluster “default-cluster” set.

[root@node1 ~]# kubectl config set-context default-context –cluster=default-cluster –user=default-admin
Context “default-context” set.

[root@node1 ~]#kubectl config use-context default-context

[root@node1 ~]# systemctl status flanneld

[root@node1 ~]# ip a
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:41:bc:42:58 brd ff:ff:ff:ff:ff:ff
inet 172.30.74.1/24 scope global docker0
valid_lft forever preferred_lft forever
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN
link/ether c6:6d:2b:8d:7c:3d brd ff:ff:ff:ff:ff:ff
inet 172.30.74.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::c46d:2bff:fe8d:7c3d/64 scope link
valid_lft forever preferred_lft forever

[root@node1 ~]# vim /etc/kubernetes/kubelet
KUBELET_ARGS=”–cluster-dns=10.254.254.254 –cluster-domain=cluster.local”

[root@node1 ~]# systemctl restart kubelet

#################                 Master   ################

[root@ansible ~]# kubectl get nodes
NAME STATUS AGE
192.168.183.131 Ready 6m

 

https://github.com/projectcalico/k8s-exec-plugin/blob/master/config/master/dns/skydns-svc.yaml

 

[root@ansible ~]# vim skydns-svc.yml
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: “true”
kubernetes.io/name: “KubeDNS”
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.254.254.254
ports:
– name: dns
port: 53
protocol: UDP
– name: dns-tcp
port: 53
protocol: TCP

 

[root@ansible ~]# kubectl create -f skydns-svc.yml
service “kube-dns” created

[root@ansible ~]# kubectl get svc –all-namespaces
NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes 10.254.0.1 <none> 443/TCP 19m
kube-system kube-dns 10.254.254.254 <none> 53/UDP,53/TCP 19s

[root@ansible ~]# vim skydns-rc.yml
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v11
namespace: kube-system
labels:
k8s-app: kube-dns
version: v11
kubernetes.io/cluster-service: “true”
spec:
replicas: 1
selector:
k8s-app: kube-dns
version: v11
template:
metadata:
labels:
k8s-app: kube-dns
version: v11
kubernetes.io/cluster-service: “true”
spec:
containers:
– name: etcd
image: gcr.io/google_containers/etcd-amd64:2.2.1
resources:
limits:
cpu: 100m
memory: 500Mi
requests:
cpu: 100m
memory: 50Mi
command:
– /usr/local/bin/etcd
– -data-dir
– /var/etcd/data
– -listen-client-urls
http://127.0.0.1:2379,http://127.0.0.1:4001
– -advertise-client-urls
http://127.0.0.1:2379,http://127.0.0.1:4001
– -initial-cluster-token
– skydns-etcd
#volumeMounts:
#- name: etcd-storage
# mountPath: /var/etcd/data
– name: kube2sky
image: gcr.io/google_containers/kube2sky:1.14
command:
– /kube2sky
– –domain=cluster.local
– –kube-master-url=http://192.168.0.20:8080
resources:
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
args:
– –domain=cluster.local
– name: skydns
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
resources:
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
args:
– -machines=http://127.0.0.1:4001
– -addr=0.0.0.0:53
– -ns-rotate=false
– -domain=cluster.local.
ports:
– containerPort: 53
name: dns
protocol: UDP
– containerPort: 53
name: dns-tcp
protocol: TCP
– name: healthz
image: gcr.io/google_containers/exechealthz:1.0
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
args:
– -cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
– -port=8080
ports:
– containerPort: 8080
protocol: TCP
#volumes:
#- name: etcd-storage
# emptyDir: {}
dnsPolicy: Default

[root@ansible ~]# kubectl create -f skydns-rc.yml
replicationcontroller “kube-dns-v11” created

[root@ansible ~]# kubectl get pods –all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system kube-dns-v11-7xkr9 0/4 ContainerCreating 0 12s
[root@ansible ~]# kubectl get pods –all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system kube-dns-v11-7xkr9 3/4 Running 1 2m

[root@ansible ~]# vim busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
– image: busybox
command:
– sleep
– “3600”
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

[root@ansible ~]# kubectl create -f busybox.yaml
pod “busybox” created
[root@ansible ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 0/1 ContainerCreating 0 22s

[root@ansible ~]# kubectl get pods –all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default busybox 0/1 ContainerCreating 0 35s
kube-system kube-dns-v11-7xkr9 3/4 Running 2 3m

[root@ansible ~]# kubectl exec -ti busybox sh
/ #

[root@ansible ~]# kubectl get pods –all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default busybox 1/1 Running 0 2m
kube-system kube-dns-v11-7xkr9 4/4 Running 3 5m

[root@ansible ~]# kubectl get nodes
NAME STATUS AGE
192.168.183.131 Ready 23m

Node1 

[root@node1 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
67afba5ac250 busybox “sleep 3600” 25 seconds ago Up 24 seconds k8s_busybox.1816e1f0_busybox_default_c6a4421e-363b-11e7-b62d-000c295b7384_a153ddb6
88028684cafe gcr.io/google_containers/kube2sky:1.14 “/kube2sky –domain=c” 54 seconds ago Up 53 seconds k8s_kube2sky.da761fae_kube-dns-v11-7xkr9_kube-system_4dc1f194-363b-11e7-b62d-000c295b7384_92628dbb
7ad29fecbb2a gcr.io/google_containers/pause-amd64:3.0 “/pause” About a minute ago Up About a minute k8s_POD.d8dbe16c_busybox_default_c6a4421e-363b-11e7-b62d-000c295b7384_97872875
412078533d22 gcr.io/google_containers/etcd-amd64:2.2.1 “/usr/local/bin/etcd ” 3 minutes ago Up 3 minutes k8s_etcd.61c01bcc_kube-dns-v11-7xkr9_kube-system_4dc1f194-363b-11e7-b62d-000c295b7384_a5b68489
1731a1a561ba gcr.io/google_containers/exechealthz:1.0 “/exechealthz ‘-cmd=n” 3 minutes ago Up 3 minutes k8s_healthz.75a218e9_kube-dns-v11-7xkr9_kube-system_4dc1f194-363b-11e7-b62d-000c295b7384_c954718f
c38745c814ea gcr.io/google_containers/skydns:2015-10-13-8c72f8c “/skydns -machines=ht” 4 minutes ago Up 4 minutes k8s_skydns.c1373fa2_kube-dns-v11-7xkr9_kube-system_4dc1f194-363b-11e7-b62d-000c295b7384_68e34163
1e9856ae23fc gcr.io/google_containers/pause-amd64:3.0 “/pause” 4 minutes ago Up 4 minutes k8s_POD.72f34cbf_kube-dns-v11-7xkr9_kube-system_4dc1f194-363b-11e7-b62d-000c295b7384_e9048764

[root@node1 ~]# kubectl exec -ti busybox sh
/ #

Leave a comment