Contents
概要
KubernetesにMultusを導入します。
参考資料
https://github.com/k8snetworkplumbingwg/multus-cni
https://rheb.hatenablog.com/entry/multus_introduction
前提条件
OS
Ubuntu 20.04を使用します。
Multusのインストール
git cloneでMultusをダウンロードし、multus-daemonset-thick-plugin.ymlをapplyします。
myadmin@ubuntu:~$ git clone https://github.com/k8snetworkplumbingwg/multus-cni.git myadmin@ubuntu:~$ cd multus-cni myadmin@ubuntu:~$ cat ./deployments/multus-daemonset-thick-plugin.yml | kubectl apply -f -
Runningを確認します。
myadmin@ubuntu:~$ kubectl get pods --all-namespaces | grep -i multus
kube-system kube-multus-ds-7vw49 1/1 Running 0 62s
kube-system kube-multus-ds-d6hvd 1/1 Running 0 62s
構成#1
myadmin@ubuntu:~$ cat <<EOF | kubectl create -f -
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-test01
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "ens37",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.67.0/24",
"rangeStart": "192.168.67.200",
"rangeEnd": "192.168.67.209",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.67.1"
}
}'
EOF
myadmin@ubuntu:~$ cat <<EOF | kubectl create -f -
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: ipvlan-test01
spec:
config: '{
"cniVersion": "0.3.0",
"type": "ipvlan",
"master": "ens37",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.67.0/24",
"rangeStart": "192.168.67.200",
"rangeEnd": "192.168.67.209",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.67.1"
}
}'
EOF
myadmin@ubuntu:~$ kubectl get network-attachment-definitions.k8s.cni.cncf.io
NAME AGE
macvlan-test01 25s
myadmin@ubuntu:~$ cat <<EOF | kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: macvlan-test01
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-test01
default-route: 192.168.67.100
spec:
containers:
- name: macvlan-test01
command: ["sleep", "infinity"]
image: ubuntu
EOF
myadmin@ubuntu:~$ cat <<EOF | kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: macvlan-test01
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-test01
spec:
containers:
- name: macvlan-test01
command: ["/bin/ash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: alpine
EOF
myadmin@ubuntu:~$ kubectl exec -it macvlan-test01-- ip a 1: lo:mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 3: eth0@if21: mtu 1450 qdisc noqueue state UP link/ether c6:71:4b:13:d3:01 brd ff:ff:ff:ff:ff:ff inet 10.255.179.41/32 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::c471:4bff:fe13:d301/64 scope link valid_lft forever preferred_lft forever 4: net1@eth0: mtu 1500 qdisc noqueue state UP link/ether ca:90:d4:58:69:58 brd ff:ff:ff:ff:ff:ff inet 192.168.69.200/24 brd 192.168.69.255 scope global net1 valid_lft forever preferred_lft forever inet6 fe80::c890:d4ff:fe58:6958/64 scope link valid_lft forever preferred_lft forever
追加したNICの疎通性を確認しています。
root@k8s-01:~# kubectl exec -it samplepod -- ping -c 4 192.168.69.1
PING 192.168.69.1 (192.168.69.1): 56 data bytes
64 bytes from 192.168.69.1: seq=0 ttl=128 time=0.411 ms
64 bytes from 192.168.69.1: seq=1 ttl=128 time=0.455 ms
64 bytes from 192.168.69.1: seq=2 ttl=128 time=0.412 ms
64 bytes from 192.168.69.1: seq=3 ttl=128 time=0.526 ms
コントロールプレーンノードの初期化
myadmin@ubuntu:~$ sudo kubeadm init --pod-network-cidr=192.168.80.0/24
[init] Using Kubernetes version: v1.23.5
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.66.160]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.66.160 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.66.160 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 17.003461 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster
NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently.
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: xy5ie7.sz5w21mit274efnk
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.66.160:6443 --token xy5ie7.sz5w21mit274efnk
--discovery-token-ca-cert-hash sha256:fa8450ba8a085909b18030c4f6e50e0a7588fd6d34bc3c4f9e640e6f695ff062
myadmin@ubuntu:~$ mkdir -p $HOME/.kube myadmin@ubuntu:~$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config myadmin@ubuntu:~$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
Podネットワークアドオンのインストール
myadmin@ubuntu:~$ kubectl create -f https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml myadmin@ubuntu:~$ curl -OL https://docs.projectcalico.org/manifests/custom-resources.yaml myadmin@ubuntu:~$ vim custom-resources.yaml apiVersion: operator.tigera.io/v1 kind: Installation metadata: name: default spec: # Configures Calico networking. calicoNetwork: # Note: The ipPools section cannot be modified post-install. ipPools: - blockSize: 26 cidr: 192.168.80.0/24 encapsulation: VXLANCrossSubnet natOutgoing: Enabled nodeSelector: all() myadmin@ubuntu:~$ kubectl create -f custom-resources.yaml installation.operator.tigera.io/default created apiserver.operator.tigera.io/default created
myadmin@master:~$ kubectl get all -A NAMESPACE NAME READY STATUS RESTARTS AGE calico-apiserver pod/calico-apiserver-647555d997-82648 1/1 Running 0 18m calico-apiserver pod/calico-apiserver-647555d997-ksh8f 1/1 Running 0 18m calico-system pod/calico-kube-controllers-67f85d7449-c4rlk 1/1 Running 0 20m calico-system pod/calico-node-427rk 1/1 Running 0 9m50s calico-system pod/calico-node-rrhtt 1/1 Running 0 20m calico-system pod/calico-typha-6cfb65854b-ht4n9 1/1 Running 0 20m kube-system pod/coredns-64897985d-84bww 1/1 Running 0 30m kube-system pod/coredns-64897985d-xkts9 1/1 Running 0 30m kube-system pod/etcd-k8s-master 1/1 Running 0 30m kube-system pod/kube-apiserver-k8s-master 1/1 Running 0 30m kube-system pod/kube-controller-manager-k8s-master 1/1 Running 0 30m kube-system pod/kube-proxy-sbt2c 1/1 Running 0 9m50s kube-system pod/kube-proxy-x2clh 1/1 Running 0 30m kube-system pod/kube-scheduler-k8s-master 1/1 Running 0 30m tigera-operator pod/tigera-operator-b876f5799-kqvmv 1/1 Running 0 24m NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE calico-apiserver service/calico-api ClusterIP 10.96.95.54443/TCP 18m calico-system service/calico-kube-controllers-metrics ClusterIP 10.111.158.187 9094/TCP 18m calico-system service/calico-typha ClusterIP 10.106.75.202 5473/TCP 20m default service/kubernetes ClusterIP 10.96.0.1 443/TCP 30m kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 30m NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE calico-system daemonset.apps/calico-node 2 2 2 2 2 kubernetes.io/os=linux 20m kube-system daemonset.apps/kube-proxy 2 2 2 2 2 kubernetes.io/os=linux 30m NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE calico-apiserver deployment.apps/calico-apiserver 2/2 2 2 18m calico-system deployment.apps/calico-kube-controllers 1/1 1 1 20m calico-system deployment.apps/calico-typha 1/1 1 1 20m kube-system deployment.apps/coredns 2/2 2 2 30m tigera-operator deployment.apps/tigera-operator 1/1 1 1 24m NAMESPACE NAME DESIRED CURRENT READY AGE calico-apiserver replicaset.apps/calico-apiserver-647555d997 2 2 2 18m calico-system replicaset.apps/calico-kube-controllers-67f85d7449 1 1 1 20m calico-system replicaset.apps/calico-typha-6cfb65854b 1 1 1 20m kube-system replicaset.apps/coredns-64897985d 2 2 2 30m tigera-operator replicaset.apps/tigera-operator-b876f5799 1 1 1 24m
ノードの追加
myadmin@worker:~$ sudo kubeadm join 192.168.66.160:6443 --token xy5ie7.sz5w21mit274efnk --discovery-token-ca-cert-hash sha256:fa8450ba8a085909b18030c4f6e50e0a7588fd6d34bc3c4f9e640e6f695ff062
myadmin@master:~$ kubectl get node NAME STATUS ROLES AGE VERSION k8s-master Ready control-plane,master 28m v1.23.5 k8s-worker Ready7m59s v1.23.5
Ubuntu 20.04 Kubernetes Multus(準備中)