diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..39a6238
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,3 @@
+*.sh text eol=lf
+*.conf text eol=lf
+vimrc text eol=lf
diff --git a/.gitignore b/.gitignore
index b98c120..6e58adb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,53 +1,13 @@
-admin-csr.json
-admin-key.pem
-admin.csr
-admin.pem
-admin.kubeconfig
-ca-config.json
-ca-csr.json
-ca-key.pem
-ca.csr
-ca.pem
-encryption-config.yaml
-kube-controller-manager-csr.json
-kube-controller-manager-key.pem
-kube-controller-manager.csr
-kube-controller-manager.kubeconfig
-kube-controller-manager.pem
-kube-scheduler-csr.json
-kube-scheduler-key.pem
-kube-scheduler.csr
-kube-scheduler.kubeconfig
-kube-scheduler.pem
-kube-proxy-csr.json
-kube-proxy-key.pem
-kube-proxy.csr
-kube-proxy.kubeconfig
-kube-proxy.pem
-kubernetes-csr.json
-kubernetes-key.pem
-kubernetes.csr
-kubernetes.pem
-worker-0-csr.json
-worker-0-key.pem
-worker-0.csr
-worker-0.kubeconfig
-worker-0.pem
-worker-1-csr.json
-worker-1-key.pem
-worker-1.csr
-worker-1.kubeconfig
-worker-1.pem
-worker-2-csr.json
-worker-2-key.pem
-worker-2.csr
-worker-2.kubeconfig
-worker-2.pem
-service-account-key.pem
-service-account.csr
-service-account.pem
-service-account-csr.json
.idea
-ubuntu-bionic*.log
+.vscode
+ubuntu-jammy*.log
.vagrant
-temp
\ No newline at end of file
+temp
+*.crt
+*.key
+*.pem
+*.csr
+*csr.json
+*.kubeconfig
+quick-steps
+venv
\ No newline at end of file
diff --git a/README.md b/README.md
index 9627793..c75834c 100644
--- a/README.md
+++ b/README.md
@@ -11,8 +11,6 @@ Kubernetes The Hard Way is optimized for learning, which means taking the long r
This tutorial is a modified version of the original developed by [Kelsey Hightower](https://github.com/kelseyhightower/kubernetes-the-hard-way).
While the original one uses GCP as the platform to deploy kubernetes, we use VirtualBox and Vagrant to deploy a cluster on a local machine. If you prefer the cloud version, refer to the original one [here](https://github.com/kelseyhightower/kubernetes-the-hard-way)
-Another difference is that we use Docker instead of containerd. There are a few other differences to the original and they are documented [here](docs/differences-to-original.md)
-
> The results of this tutorial should not be viewed as production ready, and may receive limited support from the community, but don't let that stop you from learning!
## Target Audience
@@ -23,12 +21,12 @@ The target audience for this tutorial is someone planning to support a productio
Kubernetes The Hard Way guides you through bootstrapping a highly available Kubernetes cluster with end-to-end encryption between components and RBAC authentication.
-* [Kubernetes](https://github.com/kubernetes/kubernetes) 1.13.0
-* [Docker Container Runtime](https://github.com/containerd/containerd) 18.06
-* [CNI Container Networking](https://github.com/containernetworking/cni) 0.7.5
+* [Kubernetes](https://github.com/kubernetes/kubernetes) 1.24.3
+* [Container Runtime](https://github.com/containerd/containerd) 1.5.9
+* [CNI Container Networking](https://github.com/containernetworking/cni) 0.8.6
* [Weave Networking](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/)
-* [etcd](https://github.com/coreos/etcd) v3.3.9
-* [CoreDNS](https://github.com/coredns/coredns) v1.2.2
+* [etcd](https://github.com/coreos/etcd) v3.5.3
+* [CoreDNS](https://github.com/coredns/coredns) v1.8.6
## Labs
@@ -40,13 +38,13 @@ Kubernetes The Hard Way guides you through bootstrapping a highly available Kube
* [Generating the Data Encryption Config and Key](docs/06-data-encryption-keys.md)
* [Bootstrapping the etcd Cluster](docs/07-bootstrapping-etcd.md)
* [Bootstrapping the Kubernetes Control Plane](docs/08-bootstrapping-kubernetes-controllers.md)
-* [Bootstrapping the Kubernetes Worker Nodes](docs/09-bootstrapping-kubernetes-workers.md)
-* [TLS Bootstrapping the Kubernetes Worker Nodes](docs/10-tls-bootstrapping-kubernetes-workers.md)
-* [Configuring kubectl for Remote Access](docs/11-configuring-kubectl.md)
-* [Deploy Weave - Pod Networking Solution](docs/12-configure-pod-networking.md)
-* [Kube API Server to Kubelet Configuration](docs/13-kube-apiserver-to-kubelet.md)
-* [Deploying the DNS Cluster Add-on](docs/14-dns-addon.md)
-* [Smoke Test](docs/15-smoke-test.md)
-* [E2E Test](docs/16-e2e-tests.md)
-* [Extra - Dynamic Kubelet Configuration](docs/17-extra-dynamic-kubelet-configuration.md)
+* [Installing CRI on Worker Nodes](docs/09-install-cri-workers.md)
+* [Bootstrapping the Kubernetes Worker Nodes](docs/10-bootstrapping-kubernetes-workers.md)
+* [TLS Bootstrapping the Kubernetes Worker Nodes](docs/11-tls-bootstrapping-kubernetes-workers.md)
+* [Configuring kubectl for Remote Access](docs/12-configuring-kubectl.md)
+* [Deploy Weave - Pod Networking Solution](docs/13-configure-pod-networking.md)
+* [Kube API Server to Kubelet Configuration](docs/14-kube-apiserver-to-kubelet.md)
+* [Deploying the DNS Cluster Add-on](docs/15-dns-addon.md)
+* [Smoke Test](docs/16-smoke-test.md)
+* [E2E Test](docs/17-e2e-tests.md)
* [Extra - Certificate Verification](docs/verify-certificates.md)
diff --git a/deployments/coredns.yaml b/deployments/coredns.yaml
index 23cdafd..807ef1b 100644
--- a/deployments/coredns.yaml
+++ b/deployments/coredns.yaml
@@ -4,7 +4,7 @@ metadata:
name: coredns
namespace: kube-system
---
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
@@ -22,7 +22,7 @@ rules:
- list
- watch
---
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
@@ -48,14 +48,19 @@ data:
Corefile: |
.:53 {
errors
- health
+ health {
+ lameduck 5s
+ }
+ ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
- pods insecure
- upstream
- fallthrough in-addr.arpa ip6.arpa
+ pods insecure
+ fallthrough in-addr.arpa ip6.arpa
+ ttl 30
}
prometheus :9153
- proxy . /etc/resolv.conf
+ forward . /etc/resolv.conf {
+ max_concurrent 1000
+ }
cache 30
loop
reload
@@ -92,7 +97,7 @@ spec:
operator: "Exists"
containers:
- name: coredns
- image: coredns/coredns:1.2.2
+ image: coredns/coredns:1.8.6
imagePullPolicy: IfNotPresent
resources:
limits:
diff --git a/docs/01-prerequisites.md b/docs/01-prerequisites.md
index 320c700..b9401c3 100644
--- a/docs/01-prerequisites.md
+++ b/docs/01-prerequisites.md
@@ -10,14 +10,14 @@
Download and Install [VirtualBox](https://www.virtualbox.org/wiki/Downloads) on any one of the supported platforms:
- Windows hosts
- - OS X hosts
+ - OS X hosts (x86 only, not M1)
- Linux distributions
- Solaris hosts
## Vagrant
Once VirtualBox is installed you may chose to deploy virtual machines manually on it.
-Vagrant provides an easier way to deploy multiple virtual machines on VirtualBox more consistenlty.
+Vagrant provides an easier way to deploy multiple virtual machines on VirtualBox more consistently.
Download and Install [Vagrant](https://www.vagrantup.com/) on your platform.
@@ -25,4 +25,58 @@ Download and Install [Vagrant](https://www.vagrantup.com/) on your platform.
- Debian
- Centos
- Linux
-- macOS
+- macOS (x86 only, not M1)
+
+This tutorial assumes that you have also installed Vagrant.
+
+
+## Lab Defaults
+
+The labs have been configured with the following networking defaults. If you change any of these after you have deployed any of the lab, you'll need to completely reset it and start again from the beginning:
+
+```bash
+vagrant destroy -f
+vagrant up
+```
+
+If you do change any of these, please consider that a personal preference and don't submit a PR for it.
+
+### Virtual Machine Network
+
+The network used by the Virtual Box virtual machines is `192.168.56.0/24`.
+
+To change this, edit the [Vagrantfile](../vagrant/Vagrantfile) and set the new value for the network prefix at line 9. This should not overlap any of the other network settings.
+
+Note that you do not need to edit any of the other scripts to make the above change. It is all managed by shell variable computations based on the assigned VM IP addresses and the values in the hosts file (also computed).
+
+It is *recommended* that you leave the pod and service networks with the following defaults. If you change them then you will also need to edit one or both of the CoreDNS and Weave networking manifests to accommodate your change.
+
+### Pod Network
+
+The network used to assign IP addresses to pods is `10.244.0.0/16`.
+
+To change this, open all the `.md` files in the [docs](../docs/) directory in your favourite IDE and do a global replace on
+`POD_CIDR=10.244.0.0/16`
+with the new CDIR range. This should not overlap any of the other network settings.
+
+### Service Network
+
+The network used to assign IP addresses to Cluster IP services is `10.96.0.0/16`.
+
+To change this, open all the `.md` files in the [docs](../docs/) directory in your favourite IDE and do a global replace on
+`SERVICE_CIDR=10.96.0.0/16`
+with the new CDIR range. This should not overlap any of the other network settings.
+
+Additionally edit line 164 of [coredns.yaml](../deployments/coredns.yaml) to set the new DNS service address (should still end with `.10`)
+
+## Running Commands in Parallel with tmux
+
+[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. Labs in this tutorial may require running the same commands across multiple compute instances, in those cases consider using tmux and splitting a window into multiple panes with synchronize-panes enabled to speed up the provisioning process.
+
+> The use of tmux is optional and not required to complete this tutorial.
+
+
+
+> Enable synchronize-panes by pressing `CTRL+B` followed by `"` to split the window into two panes. In each pane (selectable with mouse), ssh to the host(s) you will be working with.Next type `CTRL+X` at the prompt to begin sync. In sync mode, the dividing line between panes will be red. Everything you type or paste in one pane will be echoed in the other.
To disable synchronization type `CTRL+X` again.Note that the `CTRL-X` key binding is provided by a `.tmux.conf` loaded onto the VM by the vagrant provisioner.
+
+Next: [Compute Resources](02-compute-resources.md)
diff --git a/docs/02-compute-resources.md b/docs/02-compute-resources.md
index eb76199..407e468 100644
--- a/docs/02-compute-resources.md
+++ b/docs/02-compute-resources.md
@@ -4,15 +4,21 @@ Note: You must have VirtualBox and Vagrant configured at this point
Download this github repository and cd into the vagrant folder
-`git clone https://github.com/mmumshad/kubernetes-the-hard-way.git`
+```bash
+git clone https://github.com/mmumshad/kubernetes-the-hard-way.git
+```
CD into vagrant directory
-`cd kubernetes-the-hard-way\vagrant`
+```bash
+cd kubernetes-the-hard-way\vagrant
+```
Run Vagrant up
-`vagrant up`
+```bash
+vagrant up
+```
This does the below:
@@ -22,26 +28,24 @@ This does the below:
> If you choose to change these settings, please also update vagrant/ubuntu/vagrant/setup-hosts.sh
> to add the additional hosts to the /etc/hosts default before running "vagrant up".
-- Set's IP addresses in the range 192.168.5
+- Set's IP addresses in the range 192.168.56
- | VM | VM Name | Purpose | IP | Forwarded Port |
- | ------------ | ---------------------- |:-------------:| ------------:| ----------------:|
- | master-1 | kubernetes-ha-master-1 | Master | 192.168.5.11 | 2711 |
- | master-2 | kubernetes-ha-master-2 | Master | 192.168.5.12 | 2712 |
- | worker-1 | kubernetes-ha-worker-1 | Worker | 192.168.5.21 | 2721 |
- | worker-2 | kubernetes-ha-worker-2 | Worker | 192.168.5.22 | 2722 |
- | loadbalancer | kubernetes-ha-lb | LoadBalancer | 192.168.5.30 | 2730 |
+ | VM | VM Name | Purpose | IP | Forwarded Port | RAM |
+ | ------------ | ---------------------- |:-------------:| -------------:| ----------------:|-----:|
+ | master-1 | kubernetes-ha-master-1 | Master | 192.168.56.11 | 2711 | 2048 |
+ | master-2 | kubernetes-ha-master-2 | Master | 192.168.56.12 | 2712 | 1024 |
+ | worker-1 | kubernetes-ha-worker-1 | Worker | 192.168.56.21 | 2721 | 512 |
+ | worker-2 | kubernetes-ha-worker-2 | Worker | 192.168.56.22 | 2722 | 1024 |
+ | loadbalancer | kubernetes-ha-lb | LoadBalancer | 192.168.56.30 | 2730 | 1024 |
> These are the default settings. These can be changed in the Vagrant file
- Add's a DNS entry to each of the nodes to access internet
> DNS: 8.8.8.8
-- Install's Docker on Worker nodes
-- Runs the below command on all nodes to allow for network forwarding in IP Tables.
- This is required for kubernetes networking to function correctly.
- > sysctl net.bridge.bridge-nf-call-iptables=1
+- Sets required kernel settings for kubernetes networking to function correctly.
+See [Vagrant page](../vagrant/README.md) for details.
## SSH to the nodes
@@ -50,7 +54,7 @@ There are two ways to SSH into the nodes:
### 1. SSH using Vagrant
From the directory you ran the `vagrant up` command, run `vagrant ssh ` for example `vagrant ssh master-1`.
- > Note: Use VM field from the above table and not the vm name itself.
+ > Note: Use VM field from the above table and not the VM name itself.
### 2. SSH Using SSH Client Tools
@@ -61,30 +65,34 @@ Vagrant generates a private key for each of these VMs. It is placed under the .v
**Private Key Path:** `.vagrant/machines//virtualbox/private_key`
-**Username:** `vagrant`
+**Username/Password:** `vagrant/vagrant`
## Verify Environment
- Ensure all VMs are up
- Ensure VMs are assigned the above IP addresses
-- Ensure you can SSH into these VMs using the IP and private keys
+- Ensure you can SSH into these VMs using the IP and private keys, or `vagrant ssh`
- Ensure the VMs can ping each other
-- Ensure the worker nodes have Docker installed on them. Version: 18.06
- > command `sudo docker version`
## Troubleshooting Tips
-1. If any of the VMs failed to provision, or is not configured correct, delete the vm using the command:
+### Failed Provisioning
-`vagrant destroy `
+If any of the VMs failed to provision, or is not configured correct, delete the VM using the command:
-Then reprovision. Only the missing VMs will be re-provisioned
+```bash
+vagrant destroy
+```
-`vagrant up`
+Then re-provision. Only the missing VMs will be re-provisioned
+
+```bash
+vagrant up
+```
-Sometimes the delete does not delete the folder created for the vm and throws the below error.
+Sometimes the delete does not delete the folder created for the VM and throws an error similar to this:
VirtualBox error:
@@ -92,11 +100,38 @@ VirtualBox error:
VBoxManage.exe: error: Details: code E_FAIL (0x80004005), component SessionMachine, interface IMachine, callee IUnknown
VBoxManage.exe: error: Context: "SaveSettings()" at line 3105 of file VBoxManageModifyVM.cpp
-In such cases delete the VM, then delete the VM folder and then re-provision
+In such cases delete the VM, then delete the VM folder and then re-provision, e.g.
-`vagrant destroy `
+```bash
+vagrant destroy worker-2
+rmdir "\kubernetes-ha-worker-2
+vagrant up
+```
-`rmdir "\kubernetes-ha-worker-2"`
+### Provisioner gets stuck
-`vagrant up`
+This will most likely happen at "Waiting for machine to reboot"
+1. Hit `CTRL+C`
+1. Kill any running `ruby` process, or Vagrant will complain.
+1. Destroy the VM that got stuck: `vagrant destroy `
+1. Re-provision. It will pick up where it left off: `vagrant up`
+
+# Pausing the Environment
+
+You do not need to complete the entire lab in one session. You may shut down and resume the environment as follows, if you need to power off your computer.
+
+To shut down. This will gracefully shut down all the VMs in the reverse order to which they were started:
+
+```
+vagrant halt
+```
+
+To power on again:
+
+```
+vagrant up
+```
+
+Prev: [Prerequisites](01-prerequisites.md)
+Next: [Client tools](03-client-tools.md)
\ No newline at end of file
diff --git a/docs/03-client-tools.md b/docs/03-client-tools.md
index df9e5d9..2b68809 100644
--- a/docs/03-client-tools.md
+++ b/docs/03-client-tools.md
@@ -2,30 +2,41 @@
First identify a system from where you will perform administrative tasks, such as creating certificates, kubeconfig files and distributing them to the different VMs.
-If you are on a Linux laptop, then your laptop could be this system. In my case I chose the master-1 node to perform administrative tasks. Whichever system you chose make sure that system is able to access all the provisioned VMs through SSH to copy files over.
+If you are on a Linux laptop, then your laptop could be this system. In my case I chose the `master-1` node to perform administrative tasks. Whichever system you chose make sure that system is able to access all the provisioned VMs through SSH to copy files over.
## Access all VMs
-Generate Key Pair on master-1 node
-`$ssh-keygen`
+Here we create an SSH key pair for the `vagrant` user who we are logged in as. We will copy the public key of this pair to the other master and both workers to permit us to use password-less SSH (and SCP) go get from `master-1` to these other nodes in the context of the `vagrant` user which exists on all nodes.
+
+Generate Key Pair on `master-1` node
+
+```bash
+ssh-keygen
+```
Leave all settings to default.
View the generated public key ID at:
-```
-$cat .ssh/id_rsa.pub
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD......8+08b vagrant@master-1
+```bash
+cat ~/.ssh/id_rsa.pub
```
-Move public key of master to all other VMs
+Add this key to the local authorized_keys (`master-1`) as in some commands we scp to ourself
+```bash
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
```
-$cat >> ~/.ssh/authorized_keys <> ~/.ssh/authorized_keys < output
```
-Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.0", GitCommit:"ddf47ac13c1a9483ea035a79cd7c10005ff21a6d", GitTreeState:"clean", BuildDate:"2018-12-03T21:04:45Z", GoVersion:"go1.11.2", Compiler:"gc", Platform:"linux/amd64"}
+kubectl version -o yaml
+clientVersion:
+ buildDate: "2022-07-13T14:30:46Z"
+ compiler: gc
+ gitCommit: aef86a93758dc3cb2c658dd9657ab4ad4afc21cb
+ gitTreeState: clean
+ gitVersion: v1.24.3
+ goVersion: go1.18.3
+ major: "1"
+ minor: "24"
+ platform: linux/amd64
+kustomizeVersion: v4.5.4
+
+The connection to the server localhost:8080 was refused - did you specify the right host or port?
```
+Don't worry about the error at the end as it is expected. We have not set anything up yet!
+
+Prev: [Compute Resources](02-compute-resources.md)
Next: [Certificate Authority](04-certificate-authority.md)
diff --git a/docs/04-certificate-authority.md b/docs/04-certificate-authority.md
index b84e2cd..e55c585 100644
--- a/docs/04-certificate-authority.md
+++ b/docs/04-certificate-authority.md
@@ -8,26 +8,46 @@ You can do these on any machine with `openssl` on it. But you should be able to
In our case we do it on the master-1 node, as we have set it up to be the administrative client.
+[//]: # (host:master-1)
## Certificate Authority
In this section you will provision a Certificate Authority that can be used to generate additional TLS certificates.
+Query IPs of hosts we will insert as certificate subject alternative names (SANs), which will be read from `/etc/hosts`. Note that doing this allows us to change the VM network range more easily from the default for these labs which is `192.168.56.0/24`
+
+```bash
+MASTER_1=$(dig +short master-1)
+MASTER_2=$(dig +short master-2)
+LOADBALANCER=$(dig +short loadbalancer)
+```
+
+Compute cluster internal API server service address, which is always .1 in the service CIDR range. This is also required as a SAN in the API server certificate
+
+```bash
+SERVICE_CIDR=10.96.0.0/24
+API_SERVICE=$(echo $SERVICE_CIDR | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s.1", $1, $2, $3) }')
+```
+
+
+
Create a CA certificate, then generate a Certificate Signing Request and use it to create a private key:
-```
-# Create private key for CA
-openssl genrsa -out ca.key 2048
+```bash
+{
+ # Create private key for CA
+ openssl genrsa -out ca.key 2048
-# Comment line starting with RANDFILE in /etc/ssl/openssl.cnf definition to avoid permission issues
-sudo sed -i '0,/RANDFILE/{s/RANDFILE/\#&/}' /etc/ssl/openssl.cnf
+ # Comment line starting with RANDFILE in /etc/ssl/openssl.cnf definition to avoid permission issues
+ sudo sed -i '0,/RANDFILE/{s/RANDFILE/\#&/}' /etc/ssl/openssl.cnf
-# Create CSR using the private key
-openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA" -out ca.csr
+ # Create CSR using the private key
+ openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA/O=Kubernetes" -out ca.csr
-# Self sign the csr using its own private key
-openssl x509 -req -in ca.csr -signkey ca.key -CAcreateserial -out ca.crt -days 1000
+ # Self sign the csr using its own private key
+ openssl x509 -req -in ca.csr -signkey ca.key -CAcreateserial -out ca.crt -days 1000
+}
```
Results:
@@ -36,11 +56,11 @@ ca.crt
ca.key
```
-Reference : https://kubernetes.io/docs/concepts/cluster-administration/certificates/#openssl
+Reference : https://kubernetes.io/docs/tasks/administer-cluster/certificates/#openssl
The ca.crt is the Kubernetes Certificate Authority certificate and ca.key is the Kubernetes Certificate Authority private key.
You will use the ca.crt file in many places, so it will be copied to many places.
-The ca.key is used by the CA for signing certificates. And it should be securely stored. In this case our master node(s) is our CA server as well, so we will store it on master node(s). There is not need to copy this file to elsewhere.
+The ca.key is used by the CA for signing certificates. And it should be securely stored. In this case our master node(s) is our CA server as well, so we will store it on master node(s). There is no need to copy this file elsewhere.
## Client and Server Certificates
@@ -50,15 +70,17 @@ In this section you will generate client and server certificates for each Kubern
Generate the `admin` client certificate and private key:
-```
-# Generate private key for admin user
-openssl genrsa -out admin.key 2048
+```bash
+{
+ # Generate private key for admin user
+ openssl genrsa -out admin.key 2048
-# Generate CSR for admin user. Note the OU.
-openssl req -new -key admin.key -subj "/CN=admin/O=system:masters" -out admin.csr
+ # Generate CSR for admin user. Note the OU.
+ openssl req -new -key admin.key -subj "/CN=admin/O=system:masters" -out admin.csr
-# Sign certificate for admin user using CA servers private key
-openssl x509 -req -in admin.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out admin.crt -days 1000
+ # Sign certificate for admin user using CA servers private key
+ openssl x509 -req -in admin.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out admin.crt -days 1000
+}
```
Note that the admin user is part of the **system:masters** group. This is how we are able to perform any administrative operations on Kubernetes cluster using kubectl utility.
@@ -81,10 +103,16 @@ For now let's just focus on the control plane components.
Generate the `kube-controller-manager` client certificate and private key:
-```
-openssl genrsa -out kube-controller-manager.key 2048
-openssl req -new -key kube-controller-manager.key -subj "/CN=system:kube-controller-manager" -out kube-controller-manager.csr
-openssl x509 -req -in kube-controller-manager.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-controller-manager.crt -days 1000
+```bash
+{
+ openssl genrsa -out kube-controller-manager.key 2048
+
+ openssl req -new -key kube-controller-manager.key \
+ -subj "/CN=system:kube-controller-manager/O=system:kube-controller-manager" -out kube-controller-manager.csr
+
+ openssl x509 -req -in kube-controller-manager.csr \
+ -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-controller-manager.crt -days 1000
+}
```
Results:
@@ -100,10 +128,16 @@ kube-controller-manager.crt
Generate the `kube-proxy` client certificate and private key:
-```
-openssl genrsa -out kube-proxy.key 2048
-openssl req -new -key kube-proxy.key -subj "/CN=system:kube-proxy" -out kube-proxy.csr
-openssl x509 -req -in kube-proxy.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-proxy.crt -days 1000
+```bash
+{
+ openssl genrsa -out kube-proxy.key 2048
+
+ openssl req -new -key kube-proxy.key \
+ -subj "/CN=system:kube-proxy/O=system:node-proxier" -out kube-proxy.csr
+
+ openssl x509 -req -in kube-proxy.csr \
+ -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-proxy.crt -days 1000
+}
```
Results:
@@ -119,10 +153,15 @@ Generate the `kube-scheduler` client certificate and private key:
-```
-openssl genrsa -out kube-scheduler.key 2048
-openssl req -new -key kube-scheduler.key -subj "/CN=system:kube-scheduler" -out kube-scheduler.csr
-openssl x509 -req -in kube-scheduler.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-scheduler.crt -days 1000
+```bash
+{
+ openssl genrsa -out kube-scheduler.key 2048
+
+ openssl req -new -key kube-scheduler.key \
+ -subj "/CN=system:kube-scheduler/O=system:kube-scheduler" -out kube-scheduler.csr
+
+ openssl x509 -req -in kube-scheduler.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-scheduler.crt -days 1000
+}
```
Results:
@@ -138,35 +177,43 @@ The kube-apiserver certificate requires all names that various components may re
The `openssl` command cannot take alternate names as command line parameter. So we must create a `conf` file for it:
-```
+```bash
cat > openssl.cnf < openssl-kubelet.cnf < openssl-etcd.cnf < Expected output
+
+```
+PKI generated correctly!
+```
+
+If there are any errors, please review above steps and then re-verify
## Distribute the Certificates
-Copy the appropriate certificates and private keys to each controller instance:
+Copy the appropriate certificates and private keys to each instance:
-```
+```bash
+{
for instance in master-1 master-2; do
scp ca.crt ca.key kube-apiserver.key kube-apiserver.crt \
+ apiserver-kubelet-client.crt apiserver-kubelet-client.key \
service-account.key service-account.crt \
etcd-server.key etcd-server.crt \
+ kube-controller-manager.key kube-controller-manager.crt \
+ kube-scheduler.key kube-scheduler.crt \
${instance}:~/
done
+
+for instance in worker-1 worker-2 ; do
+ scp ca.crt kube-proxy.crt kube-proxy.key ${instance}:~/
+done
+}
```
-> The `kube-proxy`, `kube-controller-manager`, `kube-scheduler`, and `kubelet` client certificates will be used to generate client authentication configuration files in the next lab. These certificates will be embedded into the client authentication configuration files. We will then copy those configuration files to the other master nodes.
+## Optional - Check Certificates
+At `master-1` and `master-2` nodes, run the following, selecting option 1
+
+```bash
+./cert_verify.sh
+```
+
+Prev: [Client tools](03-client-tools.md)
Next: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md)
diff --git a/docs/05-kubernetes-configuration-files.md b/docs/05-kubernetes-configuration-files.md
index 4d945d8..02d92a4 100644
--- a/docs/05-kubernetes-configuration-files.md
+++ b/docs/05-kubernetes-configuration-files.md
@@ -1,6 +1,10 @@
# Generating Kubernetes Configuration Files for Authentication
-In this lab you will generate [Kubernetes configuration files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/), also known as kubeconfigs, which enable Kubernetes clients to locate and authenticate to the Kubernetes API Servers.
+In this lab you will generate [Kubernetes configuration files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/), also known as "kubeconfigs", which enable Kubernetes clients to locate and authenticate to the Kubernetes API Servers.
+
+Note: It is good practice to use file paths to certificates in kubeconfigs that will be used by the services. When certificates are updated, it is not necessary to regenerate the config files, as you would have to if the certificate data was embedded. Note also that the cert files don't exist in these paths yet - we will place them in later labs.
+
+User configs, like admin.kubeconfig will have the certificate info embedded within them.
## Client Authentication Configs
@@ -8,28 +12,28 @@ In this section you will generate kubeconfig files for the `controller manager`,
### Kubernetes Public IP Address
-Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the load balancer will be used. In our case it is `192.168.5.30`
+Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the load balancer will be used, so let's first get the address of the loadbalancer into a shell variable such that we can use it in the kubeconfigs for services that run on worker nodes. The controller manager and scheduler need to talk to the local API server, hence they use the localhost address.
-```
-LOADBALANCER_ADDRESS=192.168.5.30
+[//]: # (host:master-1)
+
+```bash
+LOADBALANCER=$(dig +short loadbalancer)
```
### The kube-proxy Kubernetes Configuration File
Generate a kubeconfig file for the `kube-proxy` service:
-```
+```bash
{
kubectl config set-cluster kubernetes-the-hard-way \
- --certificate-authority=ca.crt \
- --embed-certs=true \
- --server=https://${LOADBALANCER_ADDRESS}:6443 \
+ --certificate-authority=/var/lib/kubernetes/pki/ca.crt \
+ --server=https://${LOADBALANCER}:6443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials system:kube-proxy \
- --client-certificate=kube-proxy.crt \
- --client-key=kube-proxy.key \
- --embed-certs=true \
+ --client-certificate=/var/lib/kubernetes/pki/kube-proxy.crt \
+ --client-key=/var/lib/kubernetes/pki/kube-proxy.key \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
@@ -53,18 +57,16 @@ Reference docs for kube-proxy [here](https://kubernetes.io/docs/reference/comman
Generate a kubeconfig file for the `kube-controller-manager` service:
-```
+```bash
{
kubectl config set-cluster kubernetes-the-hard-way \
- --certificate-authority=ca.crt \
- --embed-certs=true \
+ --certificate-authority=/var/lib/kubernetes/pki/ca.crt \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
- --client-certificate=kube-controller-manager.crt \
- --client-key=kube-controller-manager.key \
- --embed-certs=true \
+ --client-certificate=/var/lib/kubernetes/pki/kube-controller-manager.crt \
+ --client-key=/var/lib/kubernetes/pki/kube-controller-manager.key \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context default \
@@ -88,18 +90,16 @@ Reference docs for kube-controller-manager [here](https://kubernetes.io/docs/ref
Generate a kubeconfig file for the `kube-scheduler` service:
-```
+```bash
{
kubectl config set-cluster kubernetes-the-hard-way \
- --certificate-authority=ca.crt \
- --embed-certs=true \
+ --certificate-authority=/var/lib/kubernetes/pki/ca.crt \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
- --client-certificate=kube-scheduler.crt \
- --client-key=kube-scheduler.key \
- --embed-certs=true \
+ --client-certificate=/var/lib/kubernetes/pki/kube-scheduler.crt \
+ --client-key=/var/lib/kubernetes/pki/kube-scheduler.key \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context default \
@@ -123,7 +123,7 @@ Reference docs for kube-scheduler [here](https://kubernetes.io/docs/reference/co
Generate a kubeconfig file for the `admin` user:
-```
+```bash
{
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.crt \
@@ -160,7 +160,7 @@ Reference docs for kubeconfig [here](https://kubernetes.io/docs/tasks/access-app
Copy the appropriate `kube-proxy` kubeconfig files to each worker instance:
-```
+```bash
for instance in worker-1 worker-2; do
scp kube-proxy.kubeconfig ${instance}:~/
done
@@ -168,10 +168,20 @@ done
Copy the appropriate `admin.kubeconfig`, `kube-controller-manager` and `kube-scheduler` kubeconfig files to each controller instance:
-```
+```bash
for instance in master-1 master-2; do
scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/
done
```
+## Optional - Check kubeconfigs
+
+At `master-1` and `master-2` nodes, run the following, selecting option 2
+
+```bash
+./cert_verify.sh
+```
+
+
+Prev: [Certificate Authority](04-certificate-authority.md)
Next: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md)
diff --git a/docs/06-data-encryption-keys.md b/docs/06-data-encryption-keys.md
index f1f155a..be5ab96 100644
--- a/docs/06-data-encryption-keys.md
+++ b/docs/06-data-encryption-keys.md
@@ -1,14 +1,16 @@
# Generating the Data Encryption Config and Key
-Kubernetes stores a variety of data including cluster state, application configurations, and secrets. Kubernetes supports the ability to [encrypt](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data) cluster data at rest.
+Kubernetes stores a variety of data including cluster state, application configurations, and secrets. Kubernetes supports the ability to [encrypt](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data) cluster data at rest, that is, the data stored within `etcd`.
In this lab you will generate an encryption key and an [encryption config](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) suitable for encrypting Kubernetes Secrets.
## The Encryption Key
+[//]: # (host:master-1)
+
Generate an encryption key:
-```
+```bash
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
```
@@ -16,7 +18,7 @@ ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
Create the `encryption-config.yaml` encryption config file:
-```
+```bash
cat > encryption-config.yaml <
Next: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md)
diff --git a/docs/07-bootstrapping-etcd.md b/docs/07-bootstrapping-etcd.md
index 0fa2027..b3d813b 100644
--- a/docs/07-bootstrapping-etcd.md
+++ b/docs/07-bootstrapping-etcd.md
@@ -1,6 +1,6 @@
# Bootstrapping the etcd Cluster
-Kubernetes components are stateless and store cluster state in [etcd](https://github.com/coreos/etcd). In this lab you will bootstrap a two node etcd cluster and configure it for high availability and secure remote access.
+Kubernetes components are stateless and store cluster state in [etcd](https://etcd.io/). In this lab you will bootstrap a two node etcd cluster and configure it for high availability and secure remote access.
## Prerequisites
@@ -14,46 +14,60 @@ The commands in this lab must be run on each controller instance: `master-1`, an
### Download and Install the etcd Binaries
-Download the official etcd release binaries from the [coreos/etcd](https://github.com/coreos/etcd) GitHub project:
+Download the official etcd release binaries from the [etcd](https://github.com/etcd-io/etcd) GitHub project:
-```
+[//]: # (host:master-1-master2)
+
+
+```bash
wget -q --show-progress --https-only --timestamping \
- "https://github.com/coreos/etcd/releases/download/v3.3.9/etcd-v3.3.9-linux-amd64.tar.gz"
+ "https://github.com/coreos/etcd/releases/download/v3.5.3/etcd-v3.5.3-linux-amd64.tar.gz"
```
Extract and install the `etcd` server and the `etcdctl` command line utility:
-```
+```bash
{
- tar -xvf etcd-v3.3.9-linux-amd64.tar.gz
- sudo mv etcd-v3.3.9-linux-amd64/etcd* /usr/local/bin/
+ tar -xvf etcd-v3.5.3-linux-amd64.tar.gz
+ sudo mv etcd-v3.5.3-linux-amd64/etcd* /usr/local/bin/
}
```
### Configure the etcd Server
-```
+Copy and secure certificates. Note that we place `ca.crt` in our main PKI directory and link it from etcd to not have multiple copies of the cert lying around.
+
+```bash
{
- sudo mkdir -p /etc/etcd /var/lib/etcd
- sudo cp ca.crt etcd-server.key etcd-server.crt /etc/etcd/
+ sudo mkdir -p /etc/etcd /var/lib/etcd /var/lib/kubernetes/pki
+ sudo cp etcd-server.key etcd-server.crt /etc/etcd/
+ sudo cp ca.crt /var/lib/kubernetes/pki/
+ sudo chown root:root /etc/etcd/*
+ sudo chmod 600 /etc/etcd/*
+ sudo chown root:root /var/lib/kubernetes/pki/*
+ sudo chmod 600 /var/lib/kubernetes/pki/*
+ sudo ln -s /var/lib/kubernetes/pki/ca.crt /etc/etcd/ca.crt
}
```
-The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address of the master(etcd) nodes:
+The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers.
+Retrieve the internal IP address of the master(etcd) nodes, and also that of master-1 and master-2 for the etcd cluster member list
-```
+```bash
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
+MASTER_1=$(dig +short master-1)
+MASTER_2=$(dig +short master-2)
```
Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance:
-```
+```bash
ETCD_NAME=$(hostname -s)
```
Create the `etcd.service` systemd unit file:
-```
+```bash
cat < output
```
-45bf9ccad8d8900a, started, master-2, https://192.168.5.12:2380, https://192.168.5.12:2379
-54a5796a6803f252, started, master-1, https://192.168.5.11:2380, https://192.168.5.11:2379
+45bf9ccad8d8900a, started, master-2, https://192.168.56.12:2380, https://192.168.56.12:2379
+54a5796a6803f252, started, master-1, https://192.168.56.11:2380, https://192.168.56.11:2379
```
Reference: https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#starting-etcd-clusters
+Prev: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md)]
Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md)
diff --git a/docs/08-bootstrapping-kubernetes-controllers.md b/docs/08-bootstrapping-kubernetes-controllers.md
index ddbb30b..c98af75 100644
--- a/docs/08-bootstrapping-kubernetes-controllers.md
+++ b/docs/08-bootstrapping-kubernetes-controllers.md
@@ -2,39 +2,35 @@
In this lab you will bootstrap the Kubernetes control plane across 2 compute instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager.
+Note that in a production-ready cluster it is recommended to have an odd number of master nodes as for multi-node services like etcd, leader election and quorum work better. See lecture on this ([KodeKloud](https://kodekloud.com/topic/etcd-in-ha/), [Udemy](https://www.udemy.com/course/certified-kubernetes-administrator-with-practice-tests/learn/lecture/14296192#overview)). We're only using two here to save on RAM on your workstation.
+
## Prerequisites
-The commands in this lab must be run on each controller instance: `master-1`, and `master-2`. Login to each controller instance using SSH Terminal. Example:
+The commands in this lab up as far as the load balancer configuration must be run on each controller instance: `master-1`, and `master-2`. Login to each controller instance using SSH Terminal.
-### Running commands in parallel with tmux
-
-[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
+You can perform this step with [tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux)
## Provision the Kubernetes Control Plane
-Create the Kubernetes configuration directory:
-
-```
-sudo mkdir -p /etc/kubernetes/config
-```
+[//]: # (host:master-1-master2)
### Download and Install the Kubernetes Controller Binaries
Download the official Kubernetes release binaries:
-```
+```bash
wget -q --show-progress --https-only --timestamping \
- "https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-apiserver" \
- "https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-controller-manager" \
- "https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-scheduler" \
- "https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl"
+ "https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-apiserver" \
+ "https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-controller-manager" \
+ "https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-scheduler" \
+ "https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl"
```
-Reference: https://kubernetes.io/docs/setup/release/#server-binaries
+Reference: https://kubernetes.io/releases/download/#binaries
Install the Kubernetes binaries:
-```
+```bash
{
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
@@ -43,32 +39,48 @@ Install the Kubernetes binaries:
### Configure the Kubernetes API Server
-```
-{
- sudo mkdir -p /var/lib/kubernetes/
+Place the key pairs into the kubernetes data directory and secure
- sudo cp ca.crt ca.key kube-apiserver.crt kube-apiserver.key \
- service-account.key service-account.crt \
- etcd-server.key etcd-server.crt \
- encryption-config.yaml /var/lib/kubernetes/
+```bash
+{
+ sudo mkdir -p /var/lib/kubernetes/pki
+
+ # Only copy CA keys as we'll need them again for workers.
+ sudo cp ca.crt ca.key /var/lib/kubernetes/pki
+ for c in kube-apiserver service-account apiserver-kubelet-client etcd-server kube-scheduler kube-controller-manager
+ do
+ sudo mv "$c.crt" "$c.key" /var/lib/kubernetes/pki/
+ done
+ sudo chown root:root /var/lib/kubernetes/pki/*
+ sudo chmod 600 /var/lib/kubernetes/pki/*
}
```
-The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance:
+The instance internal IP address will be used to advertise the API Server to members of the cluster. The load balancer IP address will be used as the external endpoint to the API servers.
+Retrieve these internal IP addresses:
-```
+```bash
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
+LOADBALANCER=$(dig +short loadbalancer)
```
-Verify it is set
+IP addresses of the two master nodes, where the etcd servers are.
+```bash
+MASTER_1=$(dig +short master-1)
+MASTER_2=$(dig +short master-2)
```
-echo $INTERNAL_IP
+
+CIDR ranges used *within* the cluster
+
+```bash
+POD_CIDR=10.244.0.0/16
+SERVICE_CIDR=10.96.0.0/16
```
Create the `kube-apiserver.service` systemd unit file:
-```
+```bash
cat < Output
+
```
+Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
@@ -218,16 +258,31 @@ In this section you will provision an external load balancer to front the Kubern
### Provision a Network Load Balancer
+A NLB operates at [layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_layer) (TCP) meaning it passes the traffic straight through to the back end servers unfettered and does not interfere with the TLS process, leaving this to the Kube API servers.
+
Login to `loadbalancer` instance using SSH Terminal.
-```
+[//]: # (host:loadbalancer)
+
+
+```bash
sudo apt-get update && sudo apt-get install -y haproxy
```
+Read IP addresses of master nodes and this host to shell variables
+
+```bash
+MASTER_1=$(dig +short master-1)
+MASTER_2=$(dig +short master-2)
+LOADBALANCER=$(dig +short loadbalancer)
```
-cat < output
@@ -258,15 +315,16 @@ curl https://192.168.5.30:6443/version -k
```
{
"major": "1",
- "minor": "13",
- "gitVersion": "v1.13.0",
- "gitCommit": "ddf47ac13c1a9483ea035a79cd7c10005ff21a6d",
+ "minor": "24",
+ "gitVersion": "v1.24.3",
+ "gitCommit": "aef86a93758dc3cb2c658dd9657ab4ad4afc21cb",
"gitTreeState": "clean",
- "buildDate": "2018-12-03T20:56:12Z",
- "goVersion": "go1.11.2",
+ "buildDate": "2022-07-13T14:23:26Z",
+ "goVersion": "go1.18.3",
"compiler": "gc",
"platform": "linux/amd64"
}
```
-Next: [Bootstrapping the Kubernetes Worker Nodes](09-bootstrapping-kubernetes-workers.md)
+Prev: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md)
+Next: [Installing CRI on the Kubernetes Worker Nodes](09-install-cri-workers.md)
diff --git a/docs/09-install-cri-workers.md b/docs/09-install-cri-workers.md
new file mode 100644
index 0000000..d50220f
--- /dev/null
+++ b/docs/09-install-cri-workers.md
@@ -0,0 +1,81 @@
+# Installing CRI on the Kubernetes Worker Nodes
+
+In this lab you will install the Container Runtime Interface (CRI) on both worker nodes. CRI is a standard interface for the management of containers. Since v1.24 the use of dockershim has been fully deprecated and removed from the code base. [containerd replaces docker](https://kodekloud.com/blog/kubernetes-removed-docker-what-happens-now/) as the container runtime for Kubernetes, and it requires support from [CNI Plugins](https://github.com/containernetworking/plugins) to configure container networks, and [runc](https://github.com/opencontainers/runc) to actually do the job of running containers.
+
+Reference: https://github.com/containerd/containerd/blob/main/docs/getting-started.md
+
+### Download and Install Container Networking
+
+The commands in this lab must be run on each worker instance: `worker-1`, and `worker-2`. Login to each controller instance using SSH Terminal.
+
+[//]: # (host:worker-1-worker-2)
+
+You can perform this step with [tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux)
+
+The versions chosen here align with those that are installed by the current `kubernetes-cni` package for a v1.24 cluster.
+
+```bash
+{
+ CONTAINERD_VERSION=1.5.9
+ CNI_VERSION=0.8.6
+ RUNC_VERSION=1.1.1
+
+ wget -q --show-progress --https-only --timestamping \
+ https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz \
+ https://github.com/containernetworking/plugins/releases/download/v${CNI_VERSION}/cni-plugins-linux-amd64-v${CNI_VERSION}.tgz \
+ https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.amd64
+
+ sudo mkdir -p /opt/cni/bin
+
+ sudo chmod +x runc.amd64
+ sudo mv runc.amd64 /usr/local/bin/runc
+
+ sudo tar -xzvf containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz -C /usr/local
+ sudo tar -xzvf cni-plugins-linux-amd64-v${CNI_VERSION}.tgz -C /opt/cni/bin
+}
+```
+
+Next create the `containerd` service unit.
+
+```bash
+cat <
+Next: [Bootstrapping the Kubernetes Worker Nodes](10-bootstrapping-kubernetes-workers.md)
diff --git a/docs/09-bootstrapping-kubernetes-workers.md b/docs/10-bootstrapping-kubernetes-workers.md
similarity index 58%
rename from docs/09-bootstrapping-kubernetes-workers.md
rename to docs/10-bootstrapping-kubernetes-workers.md
index 551620e..6d07408 100644
--- a/docs/09-bootstrapping-kubernetes-workers.md
+++ b/docs/10-bootstrapping-kubernetes-workers.md
@@ -1,6 +1,6 @@
# Bootstrapping the Kubernetes Worker Nodes
-In this lab you will bootstrap 2 Kubernetes worker nodes. We already have [Docker](https://www.docker.com) installed on these nodes.
+In this lab you will bootstrap 2 Kubernetes worker nodes. We already installed `containerd` and its dependencies on these nodes in the previous lab.
We will now install the kubernetes components
- [kubelet](https://kubernetes.io/docs/admin/kubelet)
@@ -8,7 +8,7 @@ We will now install the kubernetes components
## Prerequisites
-The Certificates and Configuration are created on `master-1` node and then copied over to workers using `scp`.
+The Certificates and Configuration are created on `master-1` node and then copied over to workers using `scp`.
Once this is done, the commands are to be run on first worker instance: `worker-1`. Login to first worker instance using SSH Terminal.
### Provisioning Kubelet Client Certificates
@@ -17,9 +17,15 @@ Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/doc
Generate a certificate and private key for one worker node:
-On master-1:
+On `master-1`:
+[//]: # (host:master-1)
+
+```bash
+WORKER_1=$(dig +short worker-1)
```
+
+```bash
cat > openssl-worker-1.cnf < Remember to run the above commands on worker node: `worker-1`
## Verification
-On master-1:
+
+[//]: # (host:master-1)
+
+Now return to the `master-1` node.
List the registered Kubernetes nodes from the master node:
-```
-master-1$ kubectl get nodes --kubeconfig admin.kubeconfig
+```bash
+kubectl get nodes --kubeconfig admin.kubeconfig
```
> output
```
NAME STATUS ROLES AGE VERSION
-worker-1 NotReady 93s v1.13.0
+worker-1 NotReady 93s v1.24.3
```
-> Note: It is OK for the worker node to be in a NotReady state.
- That is because we haven't configured Networking yet.
+The node is not ready as we have not yet installed pod networking. This comes later.
-Optional: At this point you may run the certificate verification script to make sure all certificates are configured correctly. Follow the instructions [here](verify-certificates.md)
-
-Next: [TLS Bootstrapping Kubernetes Workers](10-tls-bootstrapping-kubernetes-workers.md)
+Prev: [Installing CRI on the Kubernetes Worker Nodes](09-install-cri-workers.md)
+Next: [TLS Bootstrapping Kubernetes Workers](11-tls-bootstrapping-kubernetes-workers.md)
diff --git a/docs/10-tls-bootstrapping-kubernetes-workers.md b/docs/11-tls-bootstrapping-kubernetes-workers.md
similarity index 55%
rename from docs/10-tls-bootstrapping-kubernetes-workers.md
rename to docs/11-tls-bootstrapping-kubernetes-workers.md
index 48d1ec6..397b4af 100644
--- a/docs/10-tls-bootstrapping-kubernetes-workers.md
+++ b/docs/11-tls-bootstrapping-kubernetes-workers.md
@@ -18,7 +18,7 @@ This is not a practical approach when you have 1000s of nodes in the cluster, an
In Kubernetes 1.11 a patch was merged to require administrator or Controller approval of node serving CSRs for security reasons.
-Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#certificate-rotation
+Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#certificate-rotation
So let's get started!
@@ -39,58 +39,26 @@ So let's get started!
--cluster-signing-key-file=/var/lib/kubernetes/ca.key
```
-> Note: We have already configured these in our setup in this course
-
-Copy the ca certificate to the worker node:
-
-
-## Step 1 Configure the Binaries on the Worker node
-
-### Download and Install Worker Binaries
-
-```
-worker-2$ wget -q --show-progress --https-only --timestamping \
- https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl \
- https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-proxy \
- https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubelet
-```
-
-Reference: https://kubernetes.io/docs/setup/release/#node-binaries
-
-Create the installation directories:
-
-```
-worker-2$ sudo mkdir -p \
- /etc/cni/net.d \
- /opt/cni/bin \
- /var/lib/kubelet \
- /var/lib/kube-proxy \
- /var/lib/kubernetes \
- /var/run/kubernetes
-```
-
-Install the worker binaries:
-
-```
-{
- chmod +x kubectl kube-proxy kubelet
- sudo mv kubectl kube-proxy kubelet /usr/local/bin/
-}
-```
-### Move the ca certificate
-
-`worker-2$ sudo mv ca.crt /var/lib/kubernetes/`
+> Note: We have already configured these in lab 8 in this course
# Step 1 Create the Boostrap Token to be used by Nodes(Kubelets) to invoke Certificate API
+[//]: # (host:master-1)
+
+Run the following steps on `master-1`
+
For the workers(kubelet) to access the Certificates API, they need to authenticate to the kubernetes api-server first. For this we create a [Bootstrap Token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) to be used by the kubelet
Bootstrap Tokens take the form of a 6 character token id followed by 16 character token secret separated by a dot. Eg: abcdef.0123456789abcdef. More formally, they must match the regular expression [a-z0-9]{6}\.[a-z0-9]{16}
+Set an expiration date for the bootstrap token of 7 days from now (you can adjust this)
-
+```bash
+EXPIRATION=$(date -u --date "+7 days" +"%Y-%m-%dT%H:%M:%SZ")
```
-master-1$ cat > bootstrap-token-07401b.yaml < bootstrap-token-07401b.yaml < csrs-for-bootstrapping.yaml < csrs-for-bootstrapping.yaml < auto-approve-csrs-for-group.yaml < auto-approve-csrs-for-group.yaml < auto-approve-renewals-for-nodes.yaml < auto-approve-renewals-for-nodes.yaml < Note: We are not specifying the certificate details - tlsCertFile and tlsPrivateKeyFile - in this file
-## Step 6 Configure Kubelet Service
+## Step 8 Configure Kubelet Service
Create the `kubelet.service` systemd unit file:
-```
-worker-2$ cat < Remember to run the above commands on worker node: `worker-2`
+### Optional - Check Certificates and kubeconfigs
-## Step 9 Approve Server CSR
+At `worker-2` node, run the following, selecting option 5
-`master-1$ kubectl get csr`
-
-```
-NAME AGE REQUESTOR CONDITION
-csr-95bv6 20s system:node:worker-2 Pending
+```bash
+./cert_verify.sh
```
-Approve
+## Step 11 Approve Server CSR
+
+Now, go back to `master-1` and approve the pending kubelet-serving certificate
+
+[//]: # (host:master-1)
+[//]: # (comment:Please now manually approve the certificate before proceeding)
+
+```
+kubectl get csr --kubeconfig admin.kubeconfig
+```
+
+> Output - Note the name will be different, but it will begin with `csr-`
+
+```
+NAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITION
+csr-7k8nh 85s kubernetes.io/kubelet-serving system:node:worker-2 Pending
+csr-n7z8p 98s kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:07401b Approved,Issued
+```
+
+Approve the pending certificate. Note that the certificate name `csr-7k8nh` will be different for you, and each time you run through.
+
+```
+kubectl certificate approve csr-7k8nh --kubeconfig admin.kubeconfig
+```
-`master-1$ kubectl certificate approve csr-95bv6`
Note: In the event your cluster persists for longer than 365 days, you will need to manually approve the replacement CSR.
-Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubectl-approval
+Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubectl-approval
## Verification
List the registered Kubernetes nodes from the master node:
-```
-master-1$ kubectl get nodes --kubeconfig admin.kubeconfig
+```bash
+kubectl get nodes --kubeconfig admin.kubeconfig
```
> output
```
-NAME STATUS ROLES AGE VERSION
-worker-1 NotReady 93s v1.13.0
-worker-2 NotReady 93s v1.13.0
+NAME STATUS ROLES AGE VERSION
+worker-1 NotReady 93s v1.24.3
+worker-2 NotReady 93s v1.24.3
```
-Note: It is OK for the worker node to be in a NotReady state. That is because we haven't configured Networking yet.
-Next: [Configuring Kubectl](11-configuring-kubectl.md)
+Prev: [Bootstrapping the Kubernetes Worker Nodes](10-bootstrapping-kubernetes-workers.md)
+Next: [Configuring Kubectl](12-configuring-kubectl.md)
diff --git a/docs/12-configure-pod-networking.md b/docs/12-configure-pod-networking.md
deleted file mode 100644
index c7e5e24..0000000
--- a/docs/12-configure-pod-networking.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Provisioning Pod Network
-
-We chose to use CNI - [weave](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) as our networking option.
-
-### Install CNI plugins
-
-Download the CNI Plugins required for weave on each of the worker nodes - `worker-1` and `worker-2`
-
-`wget https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz`
-
-Extract it to /opt/cni/bin directory
-
-`sudo tar -xzvf cni-plugins-amd64-v0.7.5.tgz --directory /opt/cni/bin/`
-
-Reference: https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni
-
-### Deploy Weave Network
-
-Deploy weave network. Run only once on the `master` node.
-
-
-`kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"`
-
-Weave uses POD CIDR of `10.32.0.0/12` by default.
-
-## Verification
-
-List the registered Kubernetes nodes from the master node:
-
-```
-master-1$ kubectl get pods -n kube-system
-```
-
-> output
-
-```
-NAME READY STATUS RESTARTS AGE
-weave-net-58j2j 2/2 Running 0 89s
-weave-net-rr5dk 2/2 Running 0 89s
-```
-
-Reference: https://kubernetes.io/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy/#install-the-weave-net-addon
-
-Next: [Kube API Server to Kubelet Connectivity](13-kube-apiserver-to-kubelet.md)
diff --git a/docs/11-configuring-kubectl.md b/docs/12-configuring-kubectl.md
similarity index 74%
rename from docs/11-configuring-kubectl.md
rename to docs/12-configuring-kubectl.md
index 61d58e6..1852e4e 100644
--- a/docs/11-configuring-kubectl.md
+++ b/docs/12-configuring-kubectl.md
@@ -8,16 +8,25 @@ In this lab you will generate a kubeconfig file for the `kubectl` command line u
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the external load balancer fronting the Kubernetes API Servers will be used.
+[//]: # (host:master-1)
+
+On `master-1`
+
+Get the kube-api server load-balancer IP.
+
+```bash
+LOADBALANCER=$(dig +short loadbalancer)
+```
+
Generate a kubeconfig file suitable for authenticating as the `admin` user:
-```
+```bash
{
- KUBERNETES_LB_ADDRESS=192.168.5.30
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.crt \
--embed-certs=true \
- --server=https://${KUBERNETES_LB_ADDRESS}:6443
+ --server=https://${LOADBALANCER}:6443
kubectl config set-credentials admin \
--client-certificate=admin.crt \
@@ -44,6 +53,7 @@ kubectl get componentstatuses
> output
```
+Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
@@ -53,18 +63,17 @@ etcd-0 Healthy {"health":"true"}
List the nodes in the remote Kubernetes cluster:
-```
+```bash
kubectl get nodes
```
> output
```
-NAME STATUS ROLES AGE VERSION
-worker-1 NotReady 118s v1.13.0
-worker-2 NotReady 118s v1.13.0
+NAME STATUS ROLES AGE VERSION
+worker-1 NotReady 118s v1.24.3
+worker-2 NotReady 118s v1.24.3
```
-Note: It is OK for the worker node to be in a `NotReady` state. Worker nodes will come into `Ready` state once networking is configured.
-
-Next: [Deploy Pod Networking](12-configure-pod-networking.md)
+Prev: [TLS Bootstrapping Kubernetes Workers](11-tls-bootstrapping-kubernetes-workers.md)
+Next: [Deploy Pod Networking](13-configure-pod-networking.md)
diff --git a/docs/13-configure-pod-networking.md b/docs/13-configure-pod-networking.md
new file mode 100644
index 0000000..d6f7e6f
--- /dev/null
+++ b/docs/13-configure-pod-networking.md
@@ -0,0 +1,57 @@
+# Provisioning Pod Network
+
+Container Network Interface (CNI) is a standard interface for managing IP networks between containers across many nodes.
+
+We chose to use CNI - [weave](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) as our networking option.
+
+
+### Deploy Weave Network
+
+Deploy weave network. Run only once on the `master-1` node. You will see a warning, but this is OK.
+
+[//]: # (host:master-1)
+
+On `master-1`
+
+```bash
+kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
+```
+
+Weave uses POD CIDR of `10.32.0.0/12` by default.
+
+## Verification
+
+[//]: # (sleep:45)
+
+List the registered Kubernetes nodes from the master node:
+
+```bash
+kubectl get pods -n kube-system
+```
+
+> output
+
+```
+NAME READY STATUS RESTARTS AGE
+weave-net-58j2j 2/2 Running 0 89s
+weave-net-rr5dk 2/2 Running 0 89s
+```
+
+Once the Weave pods are fully running which might take up to 60 seconds, the nodes should be ready
+
+```bash
+kubectl get nodes
+```
+
+> Output
+
+```
+NAME STATUS ROLES AGE VERSION
+worker-1 Ready 4m11s v1.24.3
+worker-2 Ready 2m49s v1.24.3
+```
+
+Reference: https://kubernetes.io/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy/#install-the-weave-net-addon
+
+Prev: [Configuring Kubectl](12-configuring-kubectl.md)
+Next: [Kube API Server to Kubelet Connectivity](14-kube-apiserver-to-kubelet.md)
diff --git a/docs/13-kube-apiserver-to-kubelet.md b/docs/14-kube-apiserver-to-kubelet.md
similarity index 85%
rename from docs/13-kube-apiserver-to-kubelet.md
rename to docs/14-kube-apiserver-to-kubelet.md
index a8d05b9..e8bda6c 100644
--- a/docs/13-kube-apiserver-to-kubelet.md
+++ b/docs/14-kube-apiserver-to-kubelet.md
@@ -4,12 +4,14 @@ In this section you will configure RBAC permissions to allow the Kubernetes API
> This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization.
+[//]: # (host:master-1)
+
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods:
-```
+```bash
cat <
+Next: [DNS Addon](15-dns-addon.md)
diff --git a/docs/14-dns-addon.md b/docs/15-dns-addon.md
similarity index 74%
rename from docs/14-dns-addon.md
rename to docs/15-dns-addon.md
index 82841de..e634506 100644
--- a/docs/14-dns-addon.md
+++ b/docs/15-dns-addon.md
@@ -4,9 +4,13 @@ In this lab you will deploy the [DNS add-on](https://kubernetes.io/docs/concepts
## The DNS Cluster Add-on
+[//]: # (host:master-1)
+
Deploy the `coredns` cluster add-on:
-```
+Note that if you have [changed the service CIDR range](./01-prerequisites.md#service-network) and thus this file, you will need to save your copy onto `master-1` (paste to vi, then save) and apply that.
+
+```bash
kubectl apply -f https://raw.githubusercontent.com/mmumshad/kubernetes-the-hard-way/master/deployments/coredns.yaml
```
@@ -23,7 +27,9 @@ service/kube-dns created
List the pods created by the `kube-dns` deployment:
-```
+[//]: # (sleep:15)
+
+```bash
kubectl get pods -l k8s-app=kube-dns -n kube-system
```
@@ -39,15 +45,18 @@ Reference: https://kubernetes.io/docs/tasks/administer-cluster/coredns/#installi
## Verification
-Create a `busybox` deployment:
+Create a `busybox` pod:
-```
-kubectl run --generator=run-pod/v1 busybox --image=busybox:1.28 --command -- sleep 3600
+```bash
+kubectl run busybox --image=busybox:1.28 --command -- sleep 3600
```
-List the pod created by the `busybox` deployment:
+[//]: # (sleep:10)
-```
+
+List the pod created by the `busybox` pod:
+
+```bash
kubectl get pods -l run=busybox
```
@@ -60,7 +69,7 @@ busybox-bd8fb7cbd-vflm9 1/1 Running 0 10s
Execute a DNS lookup for the `kubernetes` service inside the `busybox` pod:
-```
+```bash
kubectl exec -ti busybox -- nslookup kubernetes
```
@@ -74,4 +83,5 @@ Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
```
-Next: [Smoke Test](15-smoke-test.md)
+Prev: [Kube API Server to Kubelet Connectivity](14-kube-apiserver-to-kubelet.md)
+Next: [Smoke Test](16-smoke-test.md)
diff --git a/docs/16-e2e-tests.md b/docs/16-e2e-tests.md
deleted file mode 100644
index a844d32..0000000
--- a/docs/16-e2e-tests.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# Run End-to-End Tests
-
-Install Go
-
-```
-wget https://dl.google.com/go/go1.15.linux-amd64.tar.gz
-
-sudo tar -C /usr/local -xzf go1.15.linux-amd64.tar.gz
-export GOPATH="/home/vagrant/go"
-export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
-```
-
-## Install kubetest
-
-```
-git clone https://github.com/kubernetes/test-infra.git
-cd test-infra/
-GO111MODULE=on go install ./kubetest
-```
-
-> Note: This may take a few minutes depending on your network speed
-
-## Use the version specific to your cluster
-
-```
-K8S_VERSION=$(kubectl version -o json | jq -r '.serverVersion.gitVersion')
-export KUBERNETES_CONFORMANCE_TEST=y
-export KUBECONFIG="$HOME/.kube/config"
-
-
-
-kubetest --provider=skeleton --test --test_args=”--ginkgo.focus=\[Conformance\]” --extract ${K8S_VERSION} | tee test.out
-
-```
-
-
-This could take about 1.5 to 2 hours. The number of tests run and passed will be displayed at the end.
-
-
-
-Next: [Dynamic Kubelet configuration](17-extra-dynamic-kubelet-configuration.md)
\ No newline at end of file
diff --git a/docs/15-smoke-test.md b/docs/16-smoke-test.md
similarity index 92%
rename from docs/15-smoke-test.md
rename to docs/16-smoke-test.md
index a6f265c..b0788dc 100644
--- a/docs/15-smoke-test.md
+++ b/docs/16-smoke-test.md
@@ -4,18 +4,20 @@ In this lab you will complete a series of tasks to ensure your Kubernetes cluste
## Data Encryption
+[//]: # (host:master-1)
+
In this section you will verify the ability to [encrypt secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#verifying-that-data-is-encrypted).
Create a generic secret:
-```
+```bash
kubectl create secret generic kubernetes-the-hard-way \
--from-literal="mykey=mydata"
```
Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd:
-```
+```bash
sudo ETCDCTL_API=3 etcdctl get \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.crt \
@@ -48,7 +50,9 @@ sudo ETCDCTL_API=3 etcdctl get \
The etcd key should be prefixed with `k8s:enc:aescbc:v1:key1`, which indicates the `aescbc` provider was used to encrypt the data with the `key1` encryption key.
Cleanup:
-`kubectl delete secret kubernetes-the-hard-way`
+```bash
+kubectl delete secret kubernetes-the-hard-way
+```
## Deployments
@@ -56,13 +60,15 @@ In this section you will verify the ability to create and manage [Deployments](h
Create a deployment for the [nginx](https://nginx.org/en/) web server:
+```bash
+kubectl create deployment nginx --image=nginx:1.23.1
```
-kubectl create deployment nginx --image=nginx
-```
+
+[//]: # (sleep:15)
List the pod created by the `nginx` deployment:
-```
+```bash
kubectl get pods -l app=nginx
```
@@ -79,18 +85,18 @@ In this section you will verify the ability to access applications remotely usin
Create a service to expose deployment nginx on node ports.
-```
+```bash
kubectl expose deploy nginx --type=NodePort --port 80
```
-```
+```bash
PORT_NUMBER=$(kubectl get svc -l app=nginx -o jsonpath="{.items[0].spec.ports[0].nodePort}")
```
Test to view NGINX page
-```
+```bash
curl http://worker-1:$PORT_NUMBER
curl http://worker-2:$PORT_NUMBER
```
@@ -112,13 +118,13 @@ In this section you will verify the ability to [retrieve container logs](https:/
Retrieve the full name of the `nginx` pod:
-```
+```bash
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
```
Print the `nginx` pod logs:
-```
+```bash
kubectl logs $POD_NAME
```
@@ -135,14 +141,15 @@ In this section you will verify the ability to [execute commands in a container]
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
-```
+```bash
kubectl exec -ti $POD_NAME -- nginx -v
```
> output
```
-nginx version: nginx/1.15.9
+nginx version: nginx/1.23.1
```
-Next: [End to End Tests](16-e2e-tests.md)
+Prev: [DNS Addon](15-dns-addon.md)
+Next: [End to End Tests](17-e2e-tests.md)
diff --git a/docs/17-e2e-tests.md b/docs/17-e2e-tests.md
new file mode 100644
index 0000000..e7492a8
--- /dev/null
+++ b/docs/17-e2e-tests.md
@@ -0,0 +1,40 @@
+# Run End-to-End Tests
+
+## Install Go
+
+```bash
+wget https://dl.google.com/go/go1.18.linux-amd64.tar.gz
+
+sudo tar -C /usr/local -xzf go1.18.linux-amd64.tar.gz
+```
+
+## Install kubetest
+
+```bash
+git clone --depth 1 https://github.com/kubernetes/test-infra.git
+cd test-infra/kubetest
+export GOPATH="$HOME/go"
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+go build
+```
+
+> Note: it will take a while to build as it has many dependencies.
+
+
+## Use the version specific to your cluster
+
+```bash
+sudo apt install jq -y
+```
+
+```bash
+K8S_VERSION=$(kubectl version -o json | jq -r '.serverVersion.gitVersion')
+export KUBERNETES_CONFORMANCE_TEST=y
+export KUBECONFIG="$HOME/.kube/config"
+
+./kubetest --provider=skeleton --test --test_args=”--ginkgo.focus=\[Conformance\]” --extract ${K8S_VERSION} | tee test.out
+```
+
+This could take *18 hours or more*! There are several thousand tests in the suite. The number of tests run and passed will be displayed at the end. Expect some failures as it tries tests that aren't supported by our cluster, e.g. mounting persistent volumes using NFS.
+
+Prev: [Smoke Test](16-smoke-test.md)
\ No newline at end of file
diff --git a/docs/17-extra-dynamic-kubelet-configuration.md b/docs/17-extra-dynamic-kubelet-configuration.md
deleted file mode 100644
index ddc1ccf..0000000
--- a/docs/17-extra-dynamic-kubelet-configuration.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Dynamic Kubelet Configuration
-
-`sudo apt install -y jq`
-
-
-```
-NODE_NAME="worker-1"; curl -sSL "https://localhost:6443/api/v1/nodes/${NODE_NAME}/proxy/configz" -k --cert admin.crt --key admin.key | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubelet_configz_${NODE_NAME}
-```
-
-```
-kubectl -n kube-system create configmap nodes-config --from-file=kubelet=kubelet_configz_${NODE_NAME} --append-hash -o yaml
-```
-
-Edit `worker-1` node to use the dynamically created configuration
-```
-master-1# kubectl edit node worker-1
-```
-
-Add the following YAML bit under `spec`:
-```
-configSource:
- configMap:
- name: CONFIG_MAP_NAME # replace CONFIG_MAP_NAME with the name of the ConfigMap
- namespace: kube-system
- kubeletConfigKey: kubelet
-```
-
-Configure Kubelet Service
-
-Create the `kubelet.service` systemd unit file:
-
-```
-cat <\d+)')
+comment_rx = re.compile(r'^\[//\]:\s\#\s\((?P\w+):(?P[^\)]+)\)')
+choice_rx = re.compile(r'^\s*-+\s+OR\s+-+')
+script_begin = '```bash'
+script_end = '```'
+script_open = ('{' + newline).encode('utf-8')
+script_close = '}'.encode('utf-8')
+current_host = None
+
+def write_script(filename: str, script: list):
+ path = os.path.join(qs_path, filename)
+ with open(path, "wb") as f:
+ f.write(script_open)
+ f.write(newline.join(script).encode('utf-8'))
+ f.write(script_close)
+ print(f'-> {path}')
+
+
+for doc in glob.glob(os.path.join(docs_path, '*.md')):
+ print(doc)
+ script = []
+ state = State.NONE
+ ignore_next_script = False
+ m = file_number_rx.search(os.path.basename(doc))
+ if not m:
+ continue
+ file_no = m['number']
+ section = 0
+ output_file = None
+ with codecs.open(doc, "r", encoding='utf-8') as f:
+ for line in f.readlines():
+ line = line.rstrip()
+ if state == State.NONE:
+ m = comment_rx.search(line)
+ if m:
+ token = m['token']
+ value = m['value']
+ if token == 'host':
+ if script:
+ write_script(output_file, script)
+ script = []
+ output_file = os.path.join(qs_path, f'{file_no}{chr(97 + section)}-{value}.sh')
+ section += 1
+ elif token == 'sleep':
+ script.extend([
+ f'echo "Sleeping {value}s"',
+ f'sleep {value}',
+ newline
+ ])
+ elif token == 'comment':
+ script.extend([
+ '#######################################################################',
+ '#',
+ f'# {value}',
+ '#',
+ '#######################################################################',
+ newline
+ ])
+ elif line == script_begin:
+ state = State.SCRIPT
+ elif choice_rx.match(line):
+ ignore_next_script = True
+ elif state == State.SCRIPT:
+ if line == script_end:
+ state = State.NONE
+ script.append(newline)
+ ignore_next_script = False
+ elif not (ignore_next_script or line == '{' or line == '}'):
+ script.append(line)
+ if output_file and script:
+ write_script(output_file, script)
+
diff --git a/vagrant/README.md b/vagrant/README.md
new file mode 100644
index 0000000..a9bff19
--- /dev/null
+++ b/vagrant/README.md
@@ -0,0 +1,20 @@
+# Vagrant
+
+This directory contains the configuration for the virtual machines we will use for the installation.
+
+A few prerequisites are handled by the VM provisioning steps.
+
+## Kernel Settings
+
+1. Disable cgroups v2. I found that Kubernetes currently doesn't play nice with cgroups v2, therefore we need to set a kernel boot parameter in grub to switch back to v1.
+1. Install the `br_netfilter` kernel module that permits kube-proxy to manipulate IP tables rules
+1. Add the two tunables `net.bridge.bridge-nf-call-iptables=1` and `net.ipv4.ip_forward=1` also required for successful pod networking.
+
+## DNS settings
+
+1. Set the default DNS server to be Google, as we know this always works.
+1. Set up `/etc/hosts` so that all the VMs can resolve each other
+
+## Other settings
+
+1. Install configs for `vim` and `tmux` on master-1
diff --git a/vagrant/Vagrantfile b/vagrant/Vagrantfile
index 5490ab5..a8cba18 100644
--- a/vagrant/Vagrantfile
+++ b/vagrant/Vagrantfile
@@ -6,11 +6,37 @@
NUM_MASTER_NODE = 2
NUM_WORKER_NODE = 2
-IP_NW = "192.168.5."
+IP_NW = "192.168.56."
MASTER_IP_START = 10
NODE_IP_START = 20
LB_IP_START = 30
+# Sets up hosts file and DNS
+def setup_dns(node)
+ # Set up /etc/hosts
+ node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
+ s.args = ["enp0s8", node.vm.hostname]
+ end
+ # Set up DNS resolution
+ node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
+end
+
+# Runs provisioning steps that are required by masters and workers
+def provision_kubernetes_node(node)
+ # Set up kernel parameters, modules and tunables
+ node.vm.provision "setup-kernel", :type => "shell", :path => "ubuntu/setup-kernel.sh"
+ # Restart
+ node.vm.provision :shell do |shell|
+ shell.privileged = true
+ shell.inline = "echo Rebooting"
+ shell.reboot = true
+ end
+ # Set up DNS
+ setup_dns node
+ # Install cert verification script
+ node.vm.provision "shell", inline: "ln -s /vagrant/ubuntu/cert_verify.sh /home/vagrant/cert_verify.sh"
+end
+
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
@@ -23,99 +49,63 @@ Vagrant.configure("2") do |config|
# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
# config.vm.box = "base"
- config.vm.box = "ubuntu/bionic64"
+ config.vm.box = "ubuntu/jammy64"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
config.vm.box_check_update = false
- # Create a public network, which generally matched to bridged network.
- # Bridged networks make the machine appear as another physical device on
- # your network.
- # config.vm.network "public_network"
-
- # Share an additional folder to the guest VM. The first argument is
- # the path on the host to the actual folder. The second argument is
- # the path on the guest to mount the folder. And the optional third
- # argument is a set of non-required options.
- # config.vm.synced_folder "../data", "/vagrant_data"
-
- # Provider-specific configuration so you can fine-tune various
- # backing providers for Vagrant. These expose provider-specific options.
- # Example for VirtualBox:
- #
- # config.vm.provider "virtualbox" do |vb|
- # # Customize the amount of memory on the VM:
- # vb.memory = "1024"
- # end
- #
- # View the documentation for the provider you are using for more
- # information on available options.
-
# Provision Master Nodes
(1..NUM_MASTER_NODE).each do |i|
- config.vm.define "master-#{i}" do |node|
- # Name shown in the GUI
- node.vm.provider "virtualbox" do |vb|
- vb.name = "kubernetes-ha-master-#{i}"
- vb.memory = 2048
- vb.cpus = 2
+ config.vm.define "master-#{i}" do |node|
+ # Name shown in the GUI
+ node.vm.provider "virtualbox" do |vb|
+ vb.name = "kubernetes-ha-master-#{i}"
+ if i == 1
+ vb.memory = 2048 # More needed to run e2e tests at end
+ else
+ vb.memory = 1024
end
- node.vm.hostname = "master-#{i}"
- node.vm.network :private_network, ip: IP_NW + "#{MASTER_IP_START + i}"
- node.vm.network "forwarded_port", guest: 22, host: "#{2710 + i}"
-
- node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
- s.args = ["enp0s8"]
- end
-
- node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
- node.vm.provision "file", source: "./ubuntu/cert_verify.sh", destination: "$HOME/"
-
+ vb.cpus = 2
end
+ node.vm.hostname = "master-#{i}"
+ node.vm.network :private_network, ip: IP_NW + "#{MASTER_IP_START + i}"
+ node.vm.network "forwarded_port", guest: 22, host: "#{2710 + i}"
+ provision_kubernetes_node node
+ if i == 1
+ # Install (opinionated) configs for vim and tmux on master-1. These used by the author for CKA exam.
+ node.vm.provision "file", source: "./ubuntu/tmux.conf", destination: "$HOME/.tmux.conf"
+ node.vm.provision "file", source: "./ubuntu/vimrc", destination: "$HOME/.vimrc"
+ end
+ end
end
# Provision Load Balancer Node
config.vm.define "loadbalancer" do |node|
node.vm.provider "virtualbox" do |vb|
- vb.name = "kubernetes-ha-lb"
- vb.memory = 512
- vb.cpus = 1
+ vb.name = "kubernetes-ha-lb"
+ vb.memory = 512
+ vb.cpus = 1
end
node.vm.hostname = "loadbalancer"
node.vm.network :private_network, ip: IP_NW + "#{LB_IP_START}"
- node.vm.network "forwarded_port", guest: 22, host: 2730
-
- node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
- s.args = ["enp0s8"]
- end
-
- node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
-
+ node.vm.network "forwarded_port", guest: 22, host: 2730
+ setup_dns node
end
# Provision Worker Nodes
(1..NUM_WORKER_NODE).each do |i|
config.vm.define "worker-#{i}" do |node|
- node.vm.provider "virtualbox" do |vb|
- vb.name = "kubernetes-ha-worker-#{i}"
- vb.memory = 512
- vb.cpus = 1
- end
- node.vm.hostname = "worker-#{i}"
- node.vm.network :private_network, ip: IP_NW + "#{NODE_IP_START + i}"
- node.vm.network "forwarded_port", guest: 22, host: "#{2720 + i}"
-
- node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
- s.args = ["enp0s8"]
- end
-
- node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
- node.vm.provision "install-docker", type: "shell", :path => "ubuntu/install-docker-2.sh"
- node.vm.provision "allow-bridge-nf-traffic", :type => "shell", :path => "ubuntu/allow-bridge-nf-traffic.sh"
- node.vm.provision "file", source: "./ubuntu/cert_verify.sh", destination: "$HOME/"
-
+ node.vm.provider "virtualbox" do |vb|
+ vb.name = "kubernetes-ha-worker-#{i}"
+ vb.memory = 1024
+ vb.cpus = 1
+ end
+ node.vm.hostname = "worker-#{i}"
+ node.vm.network :private_network, ip: IP_NW + "#{NODE_IP_START + i}"
+ node.vm.network "forwarded_port", guest: 22, host: "#{2720 + i}"
+ provision_kubernetes_node node
end
end
end
diff --git a/vagrant/ubuntu/allow-bridge-nf-traffic.sh b/vagrant/ubuntu/allow-bridge-nf-traffic.sh
deleted file mode 100644
index 0a36b18..0000000
--- a/vagrant/ubuntu/allow-bridge-nf-traffic.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-sysctl net.bridge.bridge-nf-call-iptables=1
diff --git a/vagrant/ubuntu/cert_verify.sh b/vagrant/ubuntu/cert_verify.sh
index dc104e0..9ed634c 100755
--- a/vagrant/ubuntu/cert_verify.sh
+++ b/vagrant/ubuntu/cert_verify.sh
@@ -4,19 +4,23 @@ set -e
# Green & Red marking for Success and Failed messages
SUCCESS='\033[0;32m'
-FAILED='\033[0;31m'
+FAILED='\033[0;31;1m'
NC='\033[0m'
-# All Cert Location
+# IP addresses
+INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
+MASTER_1=$(dig +short master-1)
+MASTER_2=$(dig +short master-2)
+WORKER_1=$(dig +short worker-1)
+WORKER_2=$(dig +short worker-2)
+LOADBALANCER=$(dig +short loadbalancer)
+LOCALHOST="127.0.0.1"
+# All Cert Location
# ca certificate location
CACERT=ca.crt
CAKEY=ca.key
-# admin certificate location
-ADMINCERT=admin.crt
-ADMINKEY=admin.key
-
# Kube controller manager certificate location
KCMCERT=kube-controller-manager.crt
KCMKEY=kube-controller-manager.key
@@ -91,294 +95,70 @@ SYSTEMD_WORKER_1_KP=/etc/systemd/system/kube-proxy.service
# Function - Master node #
-check_cert_ca()
+check_cert_and_key()
{
- if [ -z $CACERT ] && [ -z $CAKEY ]
+ local name=$1
+ local subject=$2
+ local issuer=$3
+ local nokey=
+ local cert="${CERT_LOCATION}/$1.crt"
+ local key="${CERT_LOCATION}/$1.key"
+
+ if [ -z $cert -o -z $key ]
then
- printf "${FAILED}please specify cert and key location\n"
+ printf "${FAILED}cert and/or key not present in ${CERT_LOCATION}. Perhaps you missed a copy step\n${NC}"
exit 1
- elif [ -f $CACERT ] && [ -f $CAKEY ]
+ elif [ -f $cert -a -f $key ]
then
- printf "${NC}CA cert and key found, verifying the authenticity\n"
- CACERT_SUBJECT=$(openssl x509 -in $CACERT -text | grep "Subject: CN"| tr -d " ")
- CACERT_ISSUER=$(openssl x509 -in $CACERT -text | grep "Issuer: CN"| tr -d " ")
- CACERT_MD5=$(openssl x509 -noout -modulus -in $CACERT | openssl md5| awk '{print $2}')
- CAKEY_MD5=$(openssl rsa -noout -modulus -in $CAKEY | openssl md5| awk '{print $2}')
- if [ $CACERT_SUBJECT == "Subject:CN=KUBERNETES-CA" ] && [ $CACERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $CACERT_MD5 == $CAKEY_MD5 ]
+ printf "${NC}${name} cert and key found, verifying the authenticity\n"
+ CERT_SUBJECT=$(sudo openssl x509 -in $cert -text | grep "Subject: CN"| tr -d " ")
+ CERT_ISSUER=$(sudo openssl x509 -in $cert -text | grep "Issuer: CN"| tr -d " ")
+ CERT_MD5=$(sudo openssl x509 -noout -modulus -in $cert | openssl md5| awk '{print $2}')
+ KEY_MD5=$(sudo openssl rsa -noout -modulus -in $key | openssl md5| awk '{print $2}')
+ if [ $CERT_SUBJECT == "${subject}" ] && [ $CERT_ISSUER == "${issuer}" ] && [ $CERT_MD5 == $KEY_MD5 ]
then
- printf "${SUCCESS}CA cert and key are correct\n"
+ printf "${SUCCESS}${name} cert and key are correct\n${NC}"
else
- printf "${FAILED}Exiting...Found mismtach in the CA certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n"
+ printf "${FAILED}Exiting...Found mismtach in the ${name} certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n${NC}"
exit 1
fi
else
- printf "${FAILED}ca.crt / ca.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n"
+ printf "${FAILED}${cert} / ${key} is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n"
+ echo "These should be in /var/lib/kubernetes/pki (most certs), /etc/etcd (eccd server certs) or /var/lib/kubelet (kubelet certs)${NC}"
exit 1
fi
}
-
-check_cert_admin()
+check_cert_only()
{
- if [ -z $ADMINCERT ] && [ -z $ADMINKEY ]
+ local name=$1
+ local subject=$2
+ local issuer=$3
+ local cert="${CERT_LOCATION}/$1.crt"
+
+ # Worker-2 auto cert is a .pem
+ [ -f "${CERT_LOCATION}/$1.pem" ] && cert="${CERT_LOCATION}/$1.pem"
+
+ if [ -z $cert ]
then
- printf "${FAILED}please specify cert and key location\n"
+ printf "${FAILED}cert not present in ${CERT_LOCATION}. Perhaps you missed a copy step\n${NC}"
exit 1
- elif [ -f $ADMINCERT ] && [ -f $ADMINKEY ]
+ elif [ -f $cert ]
then
- printf "${NC}admin cert and key found, verifying the authenticity\n"
- ADMINCERT_SUBJECT=$(openssl x509 -in $ADMINCERT -text | grep "Subject: CN"| tr -d " ")
- ADMINCERT_ISSUER=$(openssl x509 -in $ADMINCERT -text | grep "Issuer: CN"| tr -d " ")
- ADMINCERT_MD5=$(openssl x509 -noout -modulus -in $ADMINCERT | openssl md5| awk '{print $2}')
- ADMINKEY_MD5=$(openssl rsa -noout -modulus -in $ADMINKEY | openssl md5| awk '{print $2}')
- if [ $ADMINCERT_SUBJECT == "Subject:CN=admin,O=system:masters" ] && [ $ADMINCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $ADMINCERT_MD5 == $ADMINKEY_MD5 ]
+ printf "${NC}${name} cert found, verifying the authenticity\n"
+ CERT_SUBJECT=$(sudo openssl x509 -in $cert -text | grep "Subject: "| tr -d " ")
+ CERT_ISSUER=$(sudo openssl x509 -in $cert -text | grep "Issuer: CN"| tr -d " ")
+ CERT_MD5=$(sudo openssl x509 -noout -modulus -in $cert | openssl md5| awk '{print $2}')
+ if [ $CERT_SUBJECT == "${subject}" ] && [ $CERT_ISSUER == "${issuer}" ]
then
- printf "${SUCCESS}admin cert and key are correct\n"
+ printf "${SUCCESS}${name} cert is correct\n${NC}"
else
- printf "${FAILED}Exiting...Found mismtach in the admin certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-admin-client-certificate\n"
+ printf "${FAILED}Exiting...Found mismtach in the ${name} certificate, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n${NC}"
exit 1
fi
else
- printf "${FAILED}admin.crt / admin.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-admin-client-certificate\n"
- exit 1
- fi
-}
-
-check_cert_kcm()
-{
- if [ -z $KCMCERT ] && [ -z $KCMKEY ]
- then
- printf "${FAILED}please specify cert and key location\n"
- exit 1
- elif [ -f $KCMCERT ] && [ -f $KCMKEY ]
- then
- printf "${NC}kube-controller-manager cert and key found, verifying the authenticity\n"
- KCMCERT_SUBJECT=$(openssl x509 -in $KCMCERT -text | grep "Subject: CN"| tr -d " ")
- KCMCERT_ISSUER=$(openssl x509 -in $KCMCERT -text | grep "Issuer: CN"| tr -d " ")
- KCMCERT_MD5=$(openssl x509 -noout -modulus -in $KCMCERT | openssl md5| awk '{print $2}')
- KCMKEY_MD5=$(openssl rsa -noout -modulus -in $KCMKEY | openssl md5| awk '{print $2}')
- if [ $KCMCERT_SUBJECT == "Subject:CN=system:kube-controller-manager" ] && [ $KCMCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KCMCERT_MD5 == $KCMKEY_MD5 ]
- then
- printf "${SUCCESS}kube-controller-manager cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-controller-manager-client-certificate\n"
- exit 1
- fi
- else
- printf "${FAILED}kube-controller-manager.crt / kube-controller-manager.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-controller-manager-client-certificate\n"
- exit 1
- fi
-}
-
-check_cert_kp()
-{
- if [ -z $KPCERT ] && [ -z $KPKEY ]
- then
- printf "${FAILED}please specify cert and key location\n"
- exit 1
- elif [ -f $KPCERT ] && [ -f $KPKEY ]
- then
- printf "${NC}kube-proxy cert and key found, verifying the authenticity\n"
- KPCERT_SUBJECT=$(openssl x509 -in $KPCERT -text | grep "Subject: CN"| tr -d " ")
- KPCERT_ISSUER=$(openssl x509 -in $KPCERT -text | grep "Issuer: CN"| tr -d " ")
- KPCERT_MD5=$(openssl x509 -noout -modulus -in $KPCERT | openssl md5| awk '{print $2}')
- KPKEY_MD5=$(openssl rsa -noout -modulus -in $KPKEY | openssl md5| awk '{print $2}')
- if [ $KPCERT_SUBJECT == "Subject:CN=system:kube-proxy" ] && [ $KPCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KPCERT_MD5 == $KPKEY_MD5 ]
- then
- printf "${SUCCESS}kube-proxy cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the kube-proxy certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kube-proxy-client-certificate\n"
- exit 1
- fi
- else
- printf "${FAILED}kube-proxy.crt / kube-proxy.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kube-proxy-client-certificate\n"
- exit 1
- fi
-}
-
-check_cert_ks()
-{
- if [ -z $KSCERT ] && [ -z $KSKEY ]
- then
- printf "${FAILED}please specify cert and key location\n"
- exit 1
- elif [ -f $KSCERT ] && [ -f $KSKEY ]
- then
- printf "${NC}kube-scheduler cert and key found, verifying the authenticity\n"
- KSCERT_SUBJECT=$(openssl x509 -in $KSCERT -text | grep "Subject: CN"| tr -d " ")
- KSCERT_ISSUER=$(openssl x509 -in $KSCERT -text | grep "Issuer: CN"| tr -d " ")
- KSCERT_MD5=$(openssl x509 -noout -modulus -in $KSCERT | openssl md5| awk '{print $2}')
- KSKEY_MD5=$(openssl rsa -noout -modulus -in $KSKEY | openssl md5| awk '{print $2}')
- if [ $KSCERT_SUBJECT == "Subject:CN=system:kube-scheduler" ] && [ $KSCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KSCERT_MD5 == $KSKEY_MD5 ]
- then
- printf "${SUCCESS}kube-scheduler cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the kube-scheduler certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-scheduler-client-certificate\n"
- exit 1
- fi
- else
- printf "${FAILED}kube-scheduler.crt / kube-scheduler.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-scheduler-client-certificate\n"
- exit 1
- fi
-}
-
-check_cert_api()
-{
- if [ -z $APICERT ] && [ -z $APIKEY ]
- then
- printf "${FAILED}please specify kube-api cert and key location, Exiting....\n"
- exit 1
- elif [ -f $APICERT ] && [ -f $APIKEY ]
- then
- printf "${NC}kube-apiserver cert and key found, verifying the authenticity\n"
- APICERT_SUBJECT=$(openssl x509 -in $APICERT -text | grep "Subject: CN"| tr -d " ")
- APICERT_ISSUER=$(openssl x509 -in $APICERT -text | grep "Issuer: CN"| tr -d " ")
- APICERT_MD5=$(openssl x509 -noout -modulus -in $APICERT | openssl md5| awk '{print $2}')
- APIKEY_MD5=$(openssl rsa -noout -modulus -in $APIKEY | openssl md5| awk '{print $2}')
- if [ $APICERT_SUBJECT == "Subject:CN=kube-apiserver" ] && [ $APICERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $APICERT_MD5 == $APIKEY_MD5 ]
- then
- printf "${SUCCESS}kube-apiserver cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the kube-apiserver certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kubernetes-api-server-certificate\n"
- exit 1
- fi
- else
- printf "${FAILED}kube-apiserver.crt / kube-apiserver.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kubernetes-api-server-certificate\n"
- exit 1
- fi
-}
-
-check_cert_etcd()
-{
- if [ -z $ETCDCERT ] && [ -z $ETCDKEY ]
- then
- printf "${FAILED}please specify ETCD cert and key location, Exiting....\n"
- exit 1
- elif [ -f $ETCDCERT ] && [ -f $ETCDKEY ]
- then
- printf "${NC}ETCD cert and key found, verifying the authenticity\n"
- ETCDCERT_SUBJECT=$(openssl x509 -in $ETCDCERT -text | grep "Subject: CN"| tr -d " ")
- ETCDCERT_ISSUER=$(openssl x509 -in $ETCDCERT -text | grep "Issuer: CN"| tr -d " ")
- ETCDCERT_MD5=$(openssl x509 -noout -modulus -in $ETCDCERT | openssl md5| awk '{print $2}')
- ETCDKEY_MD5=$(openssl rsa -noout -modulus -in $ETCDKEY | openssl md5| awk '{print $2}')
- if [ $ETCDCERT_SUBJECT == "Subject:CN=etcd-server" ] && [ $ETCDCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $ETCDCERT_MD5 == $ETCDKEY_MD5 ]
- then
- printf "${SUCCESS}etcd-server.crt / etcd-server.key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the ETCD certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-etcd-server-certificate\n"
- exit 1
- fi
- else
- printf "${FAILED}etcd-server.crt / etcd-server.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-etcd-server-certificate\n"
- exit 1
- fi
-}
-
-check_cert_sa()
-{
- if [ -z $SACERT ] && [ -z $SAKEY ]
- then
- printf "${FAILED}please specify Service Account cert and key location, Exiting....\n"
- exit 1
- elif [ -f $SACERT ] && [ -f $SAKEY ]
- then
- printf "${NC}service account cert and key found, verifying the authenticity\n"
- SACERT_SUBJECT=$(openssl x509 -in $SACERT -text | grep "Subject: CN"| tr -d " ")
- SACERT_ISSUER=$(openssl x509 -in $SACERT -text | grep "Issuer: CN"| tr -d " ")
- SACERT_MD5=$(openssl x509 -noout -modulus -in $SACERT | openssl md5| awk '{print $2}')
- SAKEY_MD5=$(openssl rsa -noout -modulus -in $SAKEY | openssl md5| awk '{print $2}')
- if [ $SACERT_SUBJECT == "Subject:CN=service-accounts" ] && [ $SACERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $SACERT_MD5 == $SAKEY_MD5 ]
- then
- printf "${SUCCESS}Service Account cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the Service Account certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-service-account-key-pair\n"
- exit 1
- fi
- else
- printf "${FAILED}service-account.crt / service-account.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-service-account-key-pair\n"
- exit 1
- fi
-}
-
-
-check_cert_kpkubeconfig()
-{
- if [ -z $KPKUBECONFIG ]
- then
- printf "${FAILED}please specify kube-proxy kubeconfig location\n"
- exit 1
- elif [ -f $KPKUBECONFIG ]
- then
- printf "${NC}kube-proxy kubeconfig file found, verifying the authenticity\n"
- KPKUBECONFIG_SUBJECT=$(cat $KPKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
- KPKUBECONFIG_ISSUER=$(cat $KPKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
- KPKUBECONFIG_CERT_MD5=$(cat $KPKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
- KPKUBECONFIG_KEY_MD5=$(cat $KPKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
- KPKUBECONFIG_SERVER=$(cat $KPKUBECONFIG | grep "server:"| awk '{print $2}')
- if [ $KPKUBECONFIG_SUBJECT == "Subject:CN=system:kube-proxy" ] && [ $KPKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KPKUBECONFIG_CERT_MD5 == $KPKUBECONFIG_KEY_MD5 ] && [ $KPKUBECONFIG_SERVER == "https://192.168.5.30:6443" ]
- then
- printf "${SUCCESS}kube-proxy kubeconfig cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the kube-proxy kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-proxy-kubernetes-configuration-file\n"
- exit 1
- fi
- else
- printf "${FAILED}kube-proxy kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-proxy-kubernetes-configuration-file\n"
- exit 1
- fi
-}
-
-check_cert_kcmkubeconfig()
-{
- if [ -z $KCMKUBECONFIG ]
- then
- printf "${FAILED}please specify kube-controller-manager kubeconfig location\n"
- exit 1
- elif [ -f $KCMKUBECONFIG ]
- then
- printf "${NC}kube-controller-manager kubeconfig file found, verifying the authenticity\n"
- KCMKUBECONFIG_SUBJECT=$(cat $KCMKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
- KCMKUBECONFIG_ISSUER=$(cat $KCMKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
- KCMKUBECONFIG_CERT_MD5=$(cat $KCMKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
- KCMKUBECONFIG_KEY_MD5=$(cat $KCMKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
- KCMKUBECONFIG_SERVER=$(cat $KCMKUBECONFIG | grep "server:"| awk '{print $2}')
- if [ $KCMKUBECONFIG_SUBJECT == "Subject:CN=system:kube-controller-manager" ] && [ $KCMKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KCMKUBECONFIG_CERT_MD5 == $KCMKUBECONFIG_KEY_MD5 ] && [ $KCMKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
- then
- printf "${SUCCESS}kube-controller-manager kubeconfig cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-controller-manager-kubernetes-configuration-file\n"
- exit 1
- fi
- else
- printf "${FAILED}kube-controller-manager kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-controller-manager-kubernetes-configuration-file\n"
- exit 1
- fi
-}
-
-
-check_cert_kskubeconfig()
-{
- if [ -z $KSKUBECONFIG ]
- then
- printf "${FAILED}please specify kube-scheduler kubeconfig location\n"
- exit 1
- elif [ -f $KSKUBECONFIG ]
- then
- printf "${NC}kube-scheduler kubeconfig file found, verifying the authenticity\n"
- KSKUBECONFIG_SUBJECT=$(cat $KSKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
- KSKUBECONFIG_ISSUER=$(cat $KSKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
- KSKUBECONFIG_CERT_MD5=$(cat $KSKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
- KSKUBECONFIG_KEY_MD5=$(cat $KSKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
- KSKUBECONFIG_SERVER=$(cat $KSKUBECONFIG | grep "server:"| awk '{print $2}')
- if [ $KSKUBECONFIG_SUBJECT == "Subject:CN=system:kube-scheduler" ] && [ $KSKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KSKUBECONFIG_CERT_MD5 == $KSKUBECONFIG_KEY_MD5 ] && [ $KSKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
- then
- printf "${SUCCESS}kube-scheduler kubeconfig cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the kube-scheduler kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-scheduler-kubernetes-configuration-file\n"
- exit 1
- fi
- else
- printf "${FAILED}kube-scheduler kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-scheduler-kubernetes-configuration-file\n"
+ printf "${FAILED}${cert} missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n${NC}"
+ echo "These should be in ${CERT_LOCATION}${NC}"
exit 1
fi
}
@@ -387,17 +167,17 @@ check_cert_adminkubeconfig()
{
if [ -z $ADMINKUBECONFIG ]
then
- printf "${FAILED}please specify admin kubeconfig location\n"
+ printf "${FAILED}please specify admin kubeconfig location\n${NC}"
exit 1
elif [ -f $ADMINKUBECONFIG ]
then
printf "${NC}admin kubeconfig file found, verifying the authenticity\n"
- ADMINKUBECONFIG_SUBJECT=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
- ADMINKUBECONFIG_ISSUER=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
- ADMINKUBECONFIG_CERT_MD5=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
+ ADMINKUBECONFIG_SUBJECT=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | sudo openssl x509 -text | grep "Subject: CN" | tr -d " ")
+ ADMINKUBECONFIG_ISSUER=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | sudo openssl x509 -text | grep "Issuer: CN" | tr -d " ")
+ ADMINKUBECONFIG_CERT_MD5=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | sudo openssl x509 -noout | openssl md5 | awk '{print $2}')
ADMINKUBECONFIG_KEY_MD5=$(cat $ADMINKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
ADMINKUBECONFIG_SERVER=$(cat $ADMINKUBECONFIG | grep "server:"| awk '{print $2}')
- if [ $ADMINKUBECONFIG_SUBJECT == "Subject:CN=admin,O=system:masters" ] && [ $ADMINKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $ADMINKUBECONFIG_CERT_MD5 == $ADMINKUBECONFIG_KEY_MD5 ] && [ $ADMINKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
+ if [ $ADMINKUBECONFIG_SUBJECT == "Subject:CN=admin,O=system:masters" ] && [ $ADMINKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA,O=Kubernetes" ] && [ $ADMINKUBECONFIG_CERT_MD5 == $ADMINKUBECONFIG_KEY_MD5 ] && [ $ADMINKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
then
printf "${SUCCESS}admin kubeconfig cert and key are correct\n"
else
@@ -410,11 +190,81 @@ check_cert_adminkubeconfig()
fi
}
+
+get_kubeconfig_cert_path()
+{
+ local kubeconfig=$1
+ local cert_field=$2
+
+ sudo cat $kubeconfig | grep cert_field | awk '{print $2}'
+}
+
+check_kubeconfig()
+{
+ local name=$1
+ local location=$2
+ local apiserver=$3
+ local kubeconfig="${location}/${name}.kubeconfig"
+
+ echo "Checking $kubeconfig"
+ check_kubeconfig_exists $name $location
+ ca=$(get_kubeconfig_cert_path $kubeconfig "certificate-authority")
+ cert=$(get_kubeconfig_cert_path $kubeconfig "client-certificate")
+ key=$(get_kubeconfig_cert_path $kubeconfig "client-key")
+ server=$(sudo cat $kubeconfig | grep server | awk '{print $2}')
+
+ if [ -f "$ca"]
+ then
+ printf "${SUCCESS}Path to CA certificate is correct${NC}\n"
+ else
+ printf "${FAIL}CA certificate not found at ${ca}${NC}\n"
+ exit 1
+ fi
+
+ if [ -f "$cert"]
+ then
+ printf "${SUCCESS}Path to client certificate is correct${NC}\n"
+ else
+ printf "${FAIL}Client certificate not found at ${cert}${NC}\n"
+ exit 1
+ fi
+
+ if [ -f "$key"]
+ then
+ printf "${SUCCESS}Path to client key is correct${NC}\n"
+ else
+ printf "${FAIL}Client key not found at ${key}${NC}\n"
+ exit 1
+ fi
+
+ if [ "$apiserver" = "$server" ]
+ then
+ printf "${SUCCESS}Server URL is correct${NC}\n"
+ else
+ printf "${FAIL}Server URL ${server} is incorrect${NC}\n"
+ exit 1
+ fi
+}
+
+check_kubeconfig_exists() {
+ local name=$1
+ local location=$2
+ local kubeconfig="${location}/${name}.kubeconfig"
+
+ if [ -f "${kubeconfig}" ]
+ then
+ printf "${SUCCESS}${kubeconfig} found${NC}\n"
+ else
+ printf "${FAIL}${kubeconfig} not found!${NC}\n"
+ exit 1
+ fi
+}
+
check_systemd_etcd()
{
if [ -z $ETCDCERT ] && [ -z $ETCDKEY ]
then
- printf "${FAILED}please specify ETCD cert and key location, Exiting....\n"
+ printf "${FAILED}please specify ETCD cert and key location, Exiting....\n${NC}"
exit 1
elif [ -f $SYSTEMD_ETCD_FILE ]
then
@@ -430,7 +280,7 @@ check_systemd_etcd()
PEER_TRUSTED_CA_FILE=$(systemctl cat etcd.service | grep "\--peer-trusted-ca-file"| awk '{print $1}'| cut -d "=" -f2)
# Systemd advertise , client and peer url's
- INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
+
IAP_URL=$(systemctl cat etcd.service | grep "\--initial-advertise-peer-urls"| awk '{print $2}')
LP_URL=$(systemctl cat etcd.service | grep "\--listen-peer-urls"| awk '{print $2}')
LC_URL=$(systemctl cat etcd.service | grep "\--listen-client-urls"| awk '{print $2}')
@@ -443,23 +293,23 @@ check_systemd_etcd()
if [ $CERT_FILE == $ETCDCERT ] && [ $KEY_FILE == $ETCDKEY ] && [ $PEER_CERT_FILE == $ETCDCERT ] && [ $PEER_KEY_FILE == $ETCDKEY ] && \
[ $TRUSTED_CA_FILE == $ETCD_CA_CERT ] && [ $PEER_TRUSTED_CA_FILE = $ETCD_CA_CERT ]
then
- printf "${SUCCESS}ETCD certificate, ca and key files are correct under systemd service\n"
+ printf "${SUCCESS}ETCD certificate, ca and key files are correct under systemd service\n${NC}"
else
- printf "${FAILED}Exiting...Found mismtach in the ETCD certificate, ca and keys. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n"
+ printf "${FAILED}Exiting...Found mismtach in the ETCD certificate, ca and keys. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n${NC}"
exit 1
fi
if [ $IAP_URL == "https://$INTERNAL_IP:2380" ] && [ $LP_URL == "https://$INTERNAL_IP:2380" ] && [ $LC_URL == "https://$INTERNAL_IP:2379,https://127.0.0.1:2379" ] && \
[ $AC_URL == "https://$INTERNAL_IP:2379" ]
then
- printf "${SUCCESS}ETCD initial-advertise-peer-urls, listen-peer-urls, listen-client-urls, advertise-client-urls are correct\n"
+ printf "${SUCCESS}ETCD initial-advertise-peer-urls, listen-peer-urls, listen-client-urls, advertise-client-urls are correct\n${NC}"
else
- printf "${FAILED}Exiting...Found mismtach in the ETCD initial-advertise-peer-urls / listen-peer-urls / listen-client-urls / advertise-client-urls. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n"
+ printf "${FAILED}Exiting...Found mismtach in the ETCD initial-advertise-peer-urls / listen-peer-urls / listen-client-urls / advertise-client-urls. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n${NC}"
exit 1
fi
else
- printf "${FAILED}etcd-server.crt / etcd-server.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n"
+ printf "${FAILED}etcd-server.crt / etcd-server.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n${NC}"
exit 1
fi
}
@@ -468,13 +318,12 @@ check_systemd_api()
{
if [ -z $APICERT ] && [ -z $APIKEY ]
then
- printf "${FAILED}please specify kube-api cert and key location, Exiting....\n"
+ printf "${FAILED}please specify kube-api cert and key location, Exiting....\n${NC}"
exit 1
elif [ -f $SYSTEMD_API_FILE ]
then
- printf "${NC}Systemd for kube-api service found, verifying the authenticity\n"
+ printf "Systemd for kube-api service found, verifying the authenticity\n"
- INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
ADVERTISE_ADDRESS=$(systemctl cat kube-apiserver.service | grep "\--advertise-address" | awk '{print $1}' | cut -d "=" -f2)
CLIENT_CA_FILE=$(systemctl cat kube-apiserver.service | grep "\--client-ca-file" | awk '{print $1}' | cut -d "=" -f2)
ETCD_CA_FILE=$(systemctl cat kube-apiserver.service | grep "\--etcd-cafile" | awk '{print $1}' | cut -d "=" -f2)
@@ -487,41 +336,44 @@ check_systemd_api()
TLS_CERT_FILE=$(systemctl cat kube-apiserver.service | grep "\--tls-cert-file" | awk '{print $1}' | cut -d "=" -f2)
TLS_PRIVATE_KEY_FILE=$(systemctl cat kube-apiserver.service | grep "\--tls-private-key-file" | awk '{print $1}' | cut -d "=" -f2)
- CACERT=/var/lib/kubernetes/ca.crt
- APICERT=/var/lib/kubernetes/kube-apiserver.crt
- APIKEY=/var/lib/kubernetes/kube-apiserver.key
- SACERT=/var/lib/kubernetes/service-account.crt
+ PKI=/var/lib/kubernetes/pki
+ CACERT="${PKI}/ca.crt"
+ APICERT="${PKI}/kube-apiserver.crt"
+ APIKEY="${PKI}/kube-apiserver.key"
+ SACERT="${PKI}/service-account.crt"
+ KCCERT="${PKI}/apiserver-kubelet-client.crt"
+ KCKEY="${PKI}/apiserver-kubelet-client.key"
if [ $ADVERTISE_ADDRESS == $INTERNAL_IP ] && [ $CLIENT_CA_FILE == $CACERT ] && [ $ETCD_CA_FILE == $CACERT ] && \
- [ $ETCD_CERT_FILE == "/var/lib/kubernetes/etcd-server.crt" ] && [ $ETCD_KEY_FILE == "/var/lib/kubernetes/etcd-server.key" ] && \
- [ $KUBELET_CERTIFICATE_AUTHORITY == $CACERT ] && [ $KUBELET_CLIENT_CERTIFICATE == $APICERT ] && [ $KUBELET_CLIENT_KEY == $APIKEY ] && \
+ [ $ETCD_CERT_FILE == "${PKI}/etcd-server.crt" ] && [ $ETCD_KEY_FILE == "${PKI}/etcd-server.key" ] && \
+ [ $KUBELET_CERTIFICATE_AUTHORITY == $CACERT ] && [ $KUBELET_CLIENT_CERTIFICATE == $KCCERT ] && [ $KUBELET_CLIENT_KEY == $KCKEY ] && \
[ $SERVICE_ACCOUNT_KEY_FILE == $SACERT ] && [ $TLS_CERT_FILE == $APICERT ] && [ $TLS_PRIVATE_KEY_FILE == $APIKEY ]
then
- printf "${SUCCESS}kube-apiserver advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file are correct\n"
+ printf "${SUCCESS}kube-apiserver advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file are correct\n${NC}"
else
- printf "${FAILED}Exiting...Found mismtach in the kube-apiserver systemd file, check advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n"
+ printf "${FAILED}Exiting...Found mismtach in the kube-apiserver systemd file, check advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n${NC}"
exit 1
fi
else
- printf "${FAILED}kube-apiserver.crt / kube-apiserver.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n"
+ printf "${FAILED}kube-apiserver.crt / kube-apiserver.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n${NC}"
exit 1
fi
}
check_systemd_kcm()
{
- KCMCERT=/var/lib/kubernetes/kube-controller-manager.crt
- KCMKEY=/var/lib/kubernetes/kube-controller-manager.key
- CACERT=/var/lib/kubernetes/ca.crt
- CAKEY=/var/lib/kubernetes/ca.key
- SAKEY=/var/lib/kubernetes/service-account.key
+ KCMCERT=/var/lib/kubernetes/pki/kube-controller-manager.crt
+ KCMKEY=/var/lib/kubernetes/pki/kube-controller-manager.key
+ CACERT=/var/lib/kubernetes/pki/ca.crt
+ CAKEY=/var/lib/kubernetes/pki/ca.key
+ SAKEY=/var/lib/kubernetes/pki/service-account.key
KCMKUBECONFIG=/var/lib/kubernetes/kube-controller-manager.kubeconfig
if [ -z $KCMCERT ] && [ -z $KCMKEY ]
then
- printf "${FAILED}please specify cert and key location\n"
+ printf "${FAILED}please specify cert and key location\n${NC}"
exit 1
elif [ -f $SYSTEMD_KCM_FILE ]
then
- printf "${NC}Systemd for kube-controller-manager service found, verifying the authenticity\n"
+ printf "Systemd for kube-controller-manager service found, verifying the authenticity\n"
CLUSTER_SIGNING_CERT_FILE=$(systemctl cat kube-controller-manager.service | grep "\--cluster-signing-cert-file" | awk '{print $1}' | cut -d "=" -f2)
CLUSTER_SIGNING_KEY_FILE=$(systemctl cat kube-controller-manager.service | grep "\--cluster-signing-key-file" | awk '{print $1}' | cut -d "=" -f2)
KUBECONFIG=$(systemctl cat kube-controller-manager.service | grep "\--kubeconfig" | awk '{print $1}' | cut -d "=" -f2)
@@ -531,242 +383,180 @@ check_systemd_kcm()
if [ $CLUSTER_SIGNING_CERT_FILE == $CACERT ] && [ $CLUSTER_SIGNING_KEY_FILE == $CAKEY ] && [ $KUBECONFIG == $KCMKUBECONFIG ] && \
[ $ROOT_CA_FILE == $CACERT ] && [ $SERVICE_ACCOUNT_PRIVATE_KEY_FILE == $SAKEY ]
then
- printf "${SUCCESS}kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file are correct\n"
+ printf "${SUCCESS}kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file are correct\n${NC}"
else
- printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file ,More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n"
+ printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n${NC}"
exit 1
fi
else
- printf "${FAILED}kube-controller-manager.crt / kube-controller-manager.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n"
+ printf "${FAILED}kube-controller-manager.crt / kube-controller-manager.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n${NC}"
exit 1
fi
}
check_systemd_ks()
{
- KSCERT=/var/lib/kubernetes/kube-scheduler.crt
- KSKEY=/var/lib/kubernetes/kube-scheduler.key
+ KSCERT=/var/lib/kubernetes/pki/kube-scheduler.crt
+ KSKEY=/var/lib/kubernetes/pki/kube-scheduler.key
KSKUBECONFIG=/var/lib/kubernetes/kube-scheduler.kubeconfig
if [ -z $KSCERT ] && [ -z $KSKEY ]
then
- printf "${FAILED}please specify cert and key location\n"
+ printf "${FAILED}please specify cert and key location\n${NC}"
exit 1
elif [ -f $SYSTEMD_KS_FILE ]
then
- printf "${NC}Systemd for kube-scheduler service found, verifying the authenticity\n"
+ printf "Systemd for kube-scheduler service found, verifying the authenticity\n"
KUBECONFIG=$(systemctl cat kube-scheduler.service | grep "\--kubeconfig"| awk '{print $1}'| cut -d "=" -f2)
- ADDRESS=$(systemctl cat kube-scheduler.service | grep "\--address"| awk '{print $1}'| cut -d "=" -f2)
- if [ $KUBECONFIG == $KSKUBECONFIG ] && [ $ADDRESS == "127.0.0.1" ]
+ if [ $KUBECONFIG == $KSKUBECONFIG ]
then
- printf "${SUCCESS}kube-scheduler --kubeconfig, --address are correct\n"
+ printf "${SUCCESS}kube-scheduler --kubeconfig is correct\n${NC}"
else
- printf "${FAILED}Exiting...Found mismtach in the kube-scheduler --kubeconfig, --address, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n"
+ printf "${FAILED}Exiting...Found mismtach in the kube-scheduler --kubeconfig. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n${NC}"
exit 1
fi
else
- printf "${FAILED}kube-scheduler.crt / kube-scheduler.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n"
+ printf "${FAILED}kube-scheduler.crt / kube-scheduler.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n${NC}"
exit 1
fi
}
# END OF Function - Master node #
-# Function - Worker-1 node #
-check_cert_worker_1()
-{
- if [ -z $WORKER_1_CERT ] && [ -z $WORKER_1_KEY ]
- then
- printf "${FAILED}please specify cert and key location of worker-1 node\n"
- exit 1
- elif [ -f $WORKER_1_CERT ] && [ -f $WORKER_1_KEY ]
- then
- printf "${NC}worker-1 cert and key found, verifying the authenticity\n"
- WORKER_1_CERT_SUBJECT=$(openssl x509 -in $WORKER_1_CERT -text | grep "Subject: CN"| tr -d " ")
- WORKER_1_CERT_ISSUER=$(openssl x509 -in $WORKER_1_CERT -text | grep "Issuer: CN"| tr -d " ")
- WORKER_1_CERT_MD5=$(openssl x509 -noout -modulus -in $WORKER_1_CERT | openssl md5| awk '{print $2}')
- WORKER_1_KEY_MD5=$(openssl rsa -noout -modulus -in $WORKER_1_KEY | openssl md5| awk '{print $2}')
- if [ $WORKER_1_CERT_SUBJECT == "Subject:CN=system:node:worker-1,O=system:nodes" ] && [ $WORKER_1_CERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $WORKER_1_CERT_MD5 == $WORKER_1_KEY_MD5 ]
- then
- printf "${SUCCESS}worker-1 cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the worker-1 certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#provisioning--kubelet-client-certificates\n"
- exit 1
- fi
- else
- printf "${FAILED}/var/lib/kubelet/worker-1.crt / /var/lib/kubelet/worker-1.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#provisioning--kubelet-client-certificates\n"
- exit 1
- fi
-}
-
-check_cert_worker_1_kubeconfig()
-{
- if [ -z $WORKER_1_KUBECONFIG ]
- then
- printf "${FAILED}please specify worker-1 kubeconfig location\n"
- exit 1
- elif [ -f $WORKER_1_KUBECONFIG ]
- then
- printf "${NC}worker-1 kubeconfig file found, verifying the authenticity\n"
- WORKER_1_KUBECONFIG_SUBJECT=$(cat $WORKER_1_KUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
- WORKER_1_KUBECONFIG_ISSUER=$(cat $WORKER_1_KUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
- WORKER_1_KUBECONFIG_CERT_MD5=$(cat $WORKER_1_KUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
- WORKER_1_KUBECONFIG_KEY_MD5=$(cat $WORKER_1_KUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
- WORKER_1_KUBECONFIG_SERVER=$(cat $WORKER_1_KUBECONFIG | grep "server:"| awk '{print $2}')
- if [ $WORKER_1_KUBECONFIG_SUBJECT == "Subject:CN=system:node:worker-1,O=system:nodes" ] && [ $WORKER_1_KUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && \
- [ $WORKER_1_KUBECONFIG_CERT_MD5 == $WORKER_1_KUBECONFIG_KEY_MD5 ] && [ $WORKER_1_KUBECONFIG_SERVER == "https://192.168.5.30:6443" ]
- then
- printf "${SUCCESS}worker-1 kubeconfig cert and key are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the worker-1 kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#the-kubelet-kubernetes-configuration-file\n"
- exit 1
- fi
- else
- printf "${FAILED}worker-1 /var/lib/kubelet/kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#the-kubelet-kubernetes-configuration-file\n"
- exit 1
- fi
-}
-
-check_cert_worker_1_kubelet()
-{
-
- CACERT=/var/lib/kubernetes/ca.crt
- WORKER_1_TLSCERTFILE=/var/lib/kubelet/${HOSTNAME}.crt
- WORKER_1_TLSPRIVATEKEY=/var/lib/kubelet/${HOSTNAME}.key
-
- if [ -z $WORKER_1_KUBELET ] && [ -z $SYSTEMD_WORKER_1_KUBELET ]
- then
- printf "${FAILED}please specify worker-1 kubelet config location\n"
- exit 1
- elif [ -f $WORKER_1_KUBELET ] && [ -f $SYSTEMD_WORKER_1_KUBELET ] && [ -f $WORKER_1_TLSCERTFILE ] && [ -f $WORKER_1_TLSPRIVATEKEY ]
- then
- printf "${NC}worker-1 kubelet config file, systemd services, tls cert and key found, verifying the authenticity\n"
-
- WORKER_1_KUBELET_CA=$(cat $WORKER_1_KUBELET | grep "clientCAFile:" | awk '{print $2}' | tr -d " \"")
- WORKER_1_KUBELET_DNS=$(cat $WORKER_1_KUBELET | grep "resolvConf:" | awk '{print $2}' | tr -d " \"")
- WORKER_1_KUBELET_AUTH_MODE=$(cat $WORKER_1_KUBELET | grep "mode:" | awk '{print $2}' | tr -d " \"")
-
- if [ $WORKER_1_KUBELET_CA == $CACERT ] && [ $WORKER_1_KUBELET_DNS == "/run/systemd/resolve/resolv.conf" ] && \
- [ $WORKER_1_KUBELET_AUTH_MODE == "Webhook" ]
- then
- printf "${SUCCESS}worker-1 kubelet config CA cert, resolvConf and Auth mode are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the worker-1 kubelet config CA cert, resolvConf and Auth mode, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubelet\n"
- exit 1
- fi
-
- KUBELETCONFIG=$(systemctl cat kubelet.service | grep "\--config" | awk '{print $1}'| cut -d "=" -f2)
- TLSCERTFILE=$(systemctl cat kubelet.service | grep "\--tls-cert-file" | awk '{print $1}'| cut -d "=" -f2)
- TLSPRIVATEKEY=$(systemctl cat kubelet.service | grep "\--tls-private-key-file" | awk '{print $1}'| cut -d "=" -f2)
-
- if [ $KUBELETCONFIG == $WORKER_1_KUBELET ] && [ $TLSCERTFILE == $WORKER_1_TLSCERTFILE ] && \
- [ $TLSPRIVATEKEY == $WORKER_1_TLSPRIVATEKEY ]
- then
- printf "${SUCCESS}worker-1 kubelet systemd services are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the worker-1 kubelet systemd services, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubelet\n"
- exit 1
- fi
-
- else
- printf "${FAILED}worker-1 kubelet config, systemd services, tls cert and key file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md\n"
- exit 1
- fi
-}
-
-check_cert_worker_1_kp()
-{
-
- WORKER_1_KP_CONFIG_YAML=/var/lib/kube-proxy/kube-proxy-config.yaml
-
- if [ -z $WORKER_1_KP_KUBECONFIG ] && [ -z $SYSTEMD_WORKER_1_KP ]
- then
- printf "${FAILED}please specify worker-1 kube-proxy config and systemd service path\n"
- exit 1
- elif [ -f $WORKER_1_KP_KUBECONFIG ] && [ -f $SYSTEMD_WORKER_1_KP ] && [ -f $WORKER_1_KP_CONFIG_YAML ]
- then
- printf "${NC}worker-1 kube-proxy kubeconfig, systemd services and configuration files found, verifying the authenticity\n"
-
- KP_CONFIG=$(cat $WORKER_1_KP_CONFIG_YAML | grep "kubeconfig:" | awk '{print $2}' | tr -d " \"")
- KP_CONFIG_YAML=$(systemctl cat kube-proxy.service | grep "\--config" | awk '{print $1}'| cut -d "=" -f2)
-
- if [ $KP_CONFIG == $WORKER_1_KP_KUBECONFIG ] && [ $KP_CONFIG_YAML == $WORKER_1_KP_CONFIG_YAML ]
- then
- printf "${SUCCESS}worker-1 kube-proxy kubeconfig and configuration files are correct\n"
- else
- printf "${FAILED}Exiting...Found mismtach in the worker-1 kube-proxy kubeconfig and configuration files, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubernetes-proxy\n"
- exit 1
- fi
-
- else
- printf "${FAILED}worker-1 kube-proxy kubeconfig and configuration files are missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubernetes-proxy\n"
- exit 1
- fi
-}
-
-# END OF Function - Worker-1 node #
-
-echo -e "This script will validate the certificates in master as well as worker-1 nodes. Before proceeding, make sure you ssh into the respective node [ Master or Worker-1 ] for certificate validation\n"
-echo -e "1. Verify certification in Master Node\n"
-echo -e "2. Verify certification in Worker-1 Node\n"
-echo -e "Please select either the option 1 or 2\n"
+echo "This script will validate the certificates in master as well as worker-1 nodes. Before proceeding, make sure you ssh into the respective node [ Master or Worker-1 ] for certificate validation"
+echo
+echo " 1. Verify certificates on Master Nodes after step 4"
+echo " 2. Verify kubeconfigs on Master Nodes after step 5"
+echo " 3. Verify kubeconfigs and PKI on Master Nodes after step 8"
+echo " 4. Verify kubeconfigs and PKI on worker-1 Node after step 10"
+echo " 5. Verify kubeconfigs and PKI on worker-2 Node after step 11"
+echo
+echo -n "Please select one of the above options: "
read value
+HOST=$(hostname -s)
+
+CERT_ISSUER="Issuer:CN=KUBERNETES-CA,O=Kubernetes"
+SUBJ_CA="Subject:CN=KUBERNETES-CA,O=Kubernetes"
+SUBJ_ADMIN="Subject:CN=admin,O=system:masters"
+SUBJ_KCM="Subject:CN=system:kube-controller-manager,O=system:kube-controller-manager"
+SUBJ_KP="Subject:CN=system:kube-proxy,O=system:node-proxier"
+SUBJ_KS="Subject:CN=system:kube-scheduler,O=system:kube-scheduler"
+SUBJ_API="Subject:CN=kube-apiserver,O=Kubernetes"
+SUBJ_SA="Subject:CN=service-accounts,O=Kubernetes"
+SUBJ_ETCD="Subject:CN=etcd-server,O=Kubernetes"
+SUBJ_APIKC="Subject:CN=kube-apiserver-kubelet-client,O=system:masters"
+
case $value in
1)
+ if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ]
+ then
+ printf "${FAILED}Must run on master-1 or master-2${NC}\n"
+ exit 1
+ fi
+
echo -e "The selected option is $value, proceeding the certificate verification of Master node"
- ### MASTER NODES ###
- master_hostname=$(hostname -s)
- # CRT & KEY verification
- check_cert_ca
+ CERT_LOCATION=$HOME
+ check_cert_and_key "ca" $SUBJ_CA $CERT_ISSUER
+ check_cert_and_key "kube-apiserver" $SUBJ_API $CERT_ISSUER
+ check_cert_and_key "kube-controller-manager" $SUBJ_KCM $CERT_ISSUER
+ check_cert_and_key "kube-scheduler" $SUBJ_KS $CERT_ISSUER
+ check_cert_and_key "service-account" $SUBJ_SA $CERT_ISSUER
+ check_cert_and_key "apiserver-kubelet-client" $SUBJ_APIKC $CERT_ISSUER
+ check_cert_and_key "etcd-server" $SUBJ_ETCD $CERT_ISSUER
- if [ $master_hostname == "master-1" ]
- then
- check_cert_admin
- check_cert_kcm
- check_cert_kp
- check_cert_ks
- check_cert_adminkubeconfig
- check_cert_kpkubeconfig
+ if [ "${HOST}" = "master-1" ]
+ then
+ check_cert_and_key "admin" $SUBJ_ADMIN $CERT_ISSUER
+ check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER
fi
- check_cert_api
- check_cert_sa
- check_cert_etcd
-
- # Kubeconfig verification
- check_cert_kcmkubeconfig
- check_cert_kskubeconfig
-
- # Systemd verification
- check_systemd_etcd
- check_systemd_api
- check_systemd_kcm
- check_systemd_ks
-
- ### END OF MASTER NODES ###
-
;;
2)
- echo -e "The selected option is $value, proceeding the certificate verification of Worker-1 node"
+ if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ]
+ then
+ printf "${FAILED}Must run on master-1 or master-2${NC}\n"
+ exit 1
+ fi
- ### WORKER-1 NODE ###
+ check_cert_adminkubeconfig
+ check_kubeconfig_exists "kube-controller-manager" $HOME
+ check_kubeconfig_exists "kube-scheduler" $HOME
- check_cert_worker_1
- check_cert_worker_1_kubeconfig
- check_cert_worker_1_kubelet
- check_cert_worker_1_kp
-
- ### END OF WORKER-1 NODE ###
+ if [ "${HOST}" = "master-1" ]
+ then
+ check_kubeconfig_exists "kube-proxy" $HOME
+ fi
;;
+ 3)
+ if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ]
+ then
+ printf "${FAILED}Must run on master-1 or master-2${NC}\n"
+ exit 1
+ fi
+
+ CERT_LOCATION=/etc/etcd
+ check_cert_only "ca" $SUBJ_CA $CERT_ISSUER
+ check_cert_and_key "etcd-server" $SUBJ_ETCD $CERT_ISSUER
+
+ CERT_LOCATION=/var/lib/kubernetes/pki
+ check_cert_and_key "ca" $SUBJ_CA $CERT_ISSUER
+ check_cert_and_key "kube-apiserver" $SUBJ_API $CERT_ISSUER
+ check_cert_and_key "kube-controller-manager" $SUBJ_KCM $CERT_ISSUER
+ check_cert_and_key "kube-scheduler" $SUBJ_KS $CERT_ISSUER
+ check_cert_and_key "service-account" $SUBJ_SA $CERT_ISSUER
+ check_cert_and_key "apiserver-kubelet-client" $SUBJ_APIKC $CERT_ISSUER
+ check_cert_and_key "etcd-server" $SUBJ_ETCD $CERT_ISSUER
+
+ check_kubeconfig "kube-controller-manager" "/var/lib/kubernetes" "https://127.0.0.1:6443"
+ check_kubeconfig "kube-scheduler" "/var/lib/kubernetes" "https://127.0.0.1:6443"
+
+ check_systemd_api
+ check_systemd_etcd
+ check_systemd_kcm
+ check_systemd_ks
+ ;;
+
+ 4)
+ if ! [ "${HOST}" = "worker-1" ]
+ then
+ printf "${FAILED}Must run on worker-1${NC}\n"
+ exit 1
+ fi
+
+ CERT_LOCATION=/var/lib/kubernetes/pki
+ check_cert_only "ca" $SUBJ_CA $CERT_ISSUER
+ check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER
+ check_cert_and_key "worker-1" "Subject:CN=system:node:worker-1,O=system:nodes" $CERT_ISSUER
+ check_kubeconfig "kube-proxy" "/var/lib/kube-proxy" "https://${LOADBALANCER}:6443"
+ check_kubeconfig "kubelet" "/var/lib/kubelet" "https://${LOADBALANCER}:6443"
+ ;;
+
+ 5)
+ if ! [ "${HOST}" = "worker-2" ]
+ then
+ printf "${FAILED}Must run on worker-2${NC}\n"
+ exit 1
+ fi
+
+ CERT_LOCATION=/var/lib/kubernetes/pki
+ check_cert_only "ca" $SUBJ_CA $CERT_ISSUER
+ check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER
+
+ CERT_LOCATION=/var/lib/kubelet/pki
+ check_cert_only "kubelet-client-current" "Subject:O=system:nodes,CN=system:node:worker-2" $CERT_ISSUER
+ check_kubeconfig "kube-proxy" "/var/lib/kube-proxy" "https://${LOADBALANCER}:6443"
+ ;;
+
+
*)
- printf "${FAILED}Exiting.... Please select the valid option either 1 or 2\n"
+ printf "${FAILED}Exiting.... Please select the valid option either 1 or 2\n${NC}"
exit 1
;;
esac
diff --git a/vagrant/ubuntu/install-docker-2.sh b/vagrant/ubuntu/install-docker-2.sh
deleted file mode 100644
index 40a2085..0000000
--- a/vagrant/ubuntu/install-docker-2.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-cd /tmp
-curl -fsSL https://get.docker.com -o get-docker.sh
-sh /tmp/get-docker.sh
diff --git a/vagrant/ubuntu/install-docker.sh b/vagrant/ubuntu/install-docker.sh
deleted file mode 100644
index e742524..0000000
--- a/vagrant/ubuntu/install-docker.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-export DEBIAN_FRONTEND=noninteractive
-apt-get update \
- && apt-get install -y \
- apt-transport-https \
- ca-certificates \
- curl \
- software-properties-common \
- && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
- && add-apt-repository \
- "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \
- $(lsb_release -cs) \
- stable" \
- && apt-get update \
- && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 18.06 | head -1 | awk '{print $3}')
diff --git a/vagrant/ubuntu/setup-kernel.sh b/vagrant/ubuntu/setup-kernel.sh
new file mode 100644
index 0000000..2a76c3f
--- /dev/null
+++ b/vagrant/ubuntu/setup-kernel.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Sets up the kernel with the requirements for running Kubernetes
+# Requires a reboot, which is carried out by the vagrant provisioner.
+set -ex
+
+# Disable cgroups v2 (kernel command line parameter)
+sed -i 's/GRUB_CMDLINE_LINUX_DEFAULT="/GRUB_CMDLINE_LINUX_DEFAULT="systemd.unified_cgroup_hierarchy=0 ipv6.disable=1 /' /etc/default/grub
+update-grub
+
+# Add br_netfilter kernel module
+echo "br_netfilter" >> /etc/modules
+
+# Set network tunables
+cat <> /etc/sysctl.d/10-kubernetes.conf
+net.bridge.bridge-nf-call-iptables=1
+net.ipv4.ip_forward=1
+EOF
+
diff --git a/vagrant/ubuntu/tmux.conf b/vagrant/ubuntu/tmux.conf
new file mode 100644
index 0000000..840b827
--- /dev/null
+++ b/vagrant/ubuntu/tmux.conf
@@ -0,0 +1,3 @@
+set -g default-shell /bin/bash
+set -g mouse on
+bind -n C-x setw synchronize-panes
diff --git a/vagrant/ubuntu/update-dns.sh b/vagrant/ubuntu/update-dns.sh
index b288622..7d0f370 100644
--- a/vagrant/ubuntu/update-dns.sh
+++ b/vagrant/ubuntu/update-dns.sh
@@ -1,5 +1,6 @@
#!/bin/bash
+# Point to Google's DNS server
sed -i -e 's/#DNS=/DNS=8.8.8.8/' /etc/systemd/resolved.conf
service systemd-resolved restart
\ No newline at end of file
diff --git a/vagrant/ubuntu/vagrant/setup-hosts.sh b/vagrant/ubuntu/vagrant/setup-hosts.sh
index c20c69d..eef212a 100644
--- a/vagrant/ubuntu/vagrant/setup-hosts.sh
+++ b/vagrant/ubuntu/vagrant/setup-hosts.sh
@@ -1,17 +1,22 @@
#!/bin/bash
-set -e
+#
+# Set up /etc/hosts so we can resolve all the machines in the VirtualBox network
+set -ex
IFNAME=$1
+THISHOST=$2
ADDRESS="$(ip -4 addr show $IFNAME | grep "inet" | head -1 |awk '{print $2}' | cut -d/ -f1)"
+NETWORK=$(echo $ADDRESS | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s", $1, $2, $3) }')
sed -e "s/^.*${HOSTNAME}.*/${ADDRESS} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts
-# remove ubuntu-bionic entry
-sed -e '/^.*ubuntu-bionic.*/d' -i /etc/hosts
+# remove ubuntu-jammy entry
+sed -e '/^.*ubuntu-jammy.*/d' -i /etc/hosts
+sed -e "/^.*$2.*/d" -i /etc/hosts
# Update /etc/hosts about other hosts
cat >> /etc/hosts <