Upgrade/1.24 (#291)
* Set up Vagrantfile - Use Ubuntu 22.04 - Set required kernel parameters and tunables - Optimise file for DRY by use of local functions - No longer install Docker * Update prerequisites * Update compute resources * Update client-tools * Update cert authority * Update kube config files * Update sata encryption keys * Update etcd * Cert enhancements - Use dig for host IPs - Create front-proxy keys * Update prereqs with lab defaults * Minor update * Dynamic kubelet reconfig removed in 1.24 * Update failed provisioning * Update cert sujects. Use vars for IP addresses * Use vars for IP addresses * USe vars for IPs. Update unit file * Unit updates for 1.24. Use vars for IPs * 1.24 changes - Update unit files - Use vars for IPs - Install containerd * Use vars for IPs. Update outputs * Remove CNI plugins - done earlier * Update API versions * Adjust VM RAM * Update coredns version and api versions * Update git ignore and attributes * Note about deprecation warning * Fix kubeconfig name * Formatting changes + pin nginx version * Update kubetest * Update README * Discuss why only 2 masters * Note on changing service cidr range vs coredns * Add RAM column to VM table * Best practice - secure PKI * Secure kubeconfig * Add prev link * Adding `Prev` links * Squashed commit of the following: commit 8fbd36069cbf7365f627e5ebf5a04e37cde085d9 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 20:06:10 2022 +0100 Update dns-addon test commit 5528e873ecbe3265155da48d24c24d696635af52 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 20:00:48 2022 +0100 Fix get nodes commit 0d88ab0d1c4b6a7ae05bc2552366460f741bb763 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 20:00:19 2022 +0100 Fix env var name commit e564db03ff9c4c9ef536bcc5cd999fa1e6a3de15 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:42:52 2022 +0100 Update e2e-tests commit 247a59f2c5b84e34972f396cf87a34bcbeb2d2ef Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:39:54 2022 +0100 Updated e2e-tests commit 60b33d025bb252570f41c13f90955ec8d59141a7 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:38:02 2022 +0100 bashify commands in ``` commit 2814949d6dd569c59ea7ec61135784d51ad4de1f Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:35:32 2022 +0100 Note deprecation warning when deploying weave commit af0264e13e5f0e277f8f31e5115a813680aadd74 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:33:55 2022 +0100 Nodes are ready at end of step 11 commit 050502386d36a8593ed7348e902cdff9ad9c64b2 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:30:00 2022 +0100 Minor change CNI commit 04bdc1483e9696ed018ac26b6480237ee1dcf1d1 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:21:22 2022 +0100 Explain data at rest is in etcd commit 243154b9866f5a7a1a49037f97e38c6bf7ffbcb7 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:18:49 2022 +0100 Explanation of api cluster ip commit dd168ac2e128cbd405248115d8724498fa18fa67 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:14:42 2022 +0100 Include vagrant password commit d51c65a77ac192e2468d92f0067958c69057a2e0 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:12:34 2022 +0100 Update tmux message commit 10f41737100ab410adb6b20712ee32cd80618e3d Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 19:09:23 2022 +0100 Insert step to configure CNI on both workers Optionally with tmux commit 8fd873f1492f6ea1c846b3309f57740e8501adee Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 18:42:27 2022 +0100 Shuffle up to make room for common cni install commit d650443b069a7543cbb4cf449818a81d84932007 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 07:34:59 2022 +0100 Added warning output to componentstatuses commit 7bfef8f16bd1a126dcf3e5f43a02d79517d64c74 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 07:34:38 2022 +0100 Rearrange text commit b16b92bc6513cf355a41afa22ddfe2696142c28b Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 07:34:18 2022 +0100 Minor wording change DNS arress is conventionally .10 commit 96c9d25663ce3d721e670262bb6858e9a7183873 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 07:32:24 2022 +0100 Use shell vars for etcd addresses commit c9e223fba5324a1c65d6f583cf9e739b8459df5d Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 07:31:58 2022 +0100 Update on network defaults commit 1cf98649df9410b8a7d14c68bcb17c24aa6a210a Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 07:05:38 2022 +0100 Get and install correct CNI components commit 311905fba72f4a48cde4a73c589daea9b76042b7 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Thu Aug 25 06:18:55 2022 +0100 Update Approve CSR commit 4c39c84c172fde8ab2aafc4ea38b050eb7f3019b Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Wed Aug 24 20:34:53 2022 +0100 Moving certs out of service kuebeconfigs * Squashed commit of the following: commit 252cc335739e3c8007ab86c951222aba954d80f7 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Sun Aug 28 20:29:23 2022 +0100 Update external links commit 8091d1a13bc5a29654db2b8fecd55b8180bf8cab Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Sun Aug 28 20:28:14 2022 +0100 Mac M1 note commit 8b7e6065ffb74532b6ad7570a8c978addcc7fb66 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Sun Aug 28 20:03:11 2022 +0100 Tweak order of commands e2e tests commit 857d039dd1dff28e92d392ad6c5e40814a9eb054 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Sun Aug 28 20:02:51 2022 +0100 Fixing kubecomfig checks commit 26f42049bebd2d539406e6e16c51bb06441702f1 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Sun Aug 28 15:51:13 2022 +0100 Updated cert_verify commit 0df54e4c3499e6d79b836e1dfcf74eb9fdf196b1 Author: Alistair Mackay <34012094+fireflycons@users.noreply.github.com> Date: Sun Aug 28 09:09:14 2022 +0100 Rewite cert_verify Round 1 certs and kubeconfigs * Update README - Insert CNI lab - Correct CNI versions * Automate hostfile network settings Determine from interface address passed in. * Update 01-prerequisites.md * Update 01-prerequisites.md Correct the default vm ip range * Review updates. Issue 1 * Review updates. Issue 2 * Review updates. Issue 3 In actual fact, the base script is cert_verfiy.sh so the error is in the link created by the provisioner. You'll see that the later labs all refer to it with underscore. * Review updates. Issue 5 * Review updates. Issue 6 * Review updates. Issue 7 I whip through the scripts so fast, that even if I had copied it twice to my quick script, I didn't notice it say that the resource exists and is unchanged! * These certs already copied in step 4 * Formatting and command grouping * Review updates. Step 11 cert_verify Needs to be done after kublet starts as it is looking for the auto-issued cert * Group coomand batches * Remove duplicate clusterrolebinding * Extraction of scripts from md using tool This uses markdown comments and ```bash fence to determine what to extract and for which hosts Fixed shell var bug in step 11 * Fixed typos * Be specific that we're doing shutdown, not suspend * Minor edits for clarity * remove the extra \ * Rename step 9 to CRI, as that's what it actually is * Disambiguate CRI vs CNI * small fixes Co-authored-by: Tej Singh Rana <58101587+Tej-Singh-Rana@users.noreply.github.com>pull/634/head
parent
6327752d82
commit
dcddd3347f
|
@ -0,0 +1,3 @@
|
||||||
|
*.sh text eol=lf
|
||||||
|
*.conf text eol=lf
|
||||||
|
vimrc text eol=lf
|
|
@ -1,53 +1,13 @@
|
||||||
admin-csr.json
|
|
||||||
admin-key.pem
|
|
||||||
admin.csr
|
|
||||||
admin.pem
|
|
||||||
admin.kubeconfig
|
|
||||||
ca-config.json
|
|
||||||
ca-csr.json
|
|
||||||
ca-key.pem
|
|
||||||
ca.csr
|
|
||||||
ca.pem
|
|
||||||
encryption-config.yaml
|
|
||||||
kube-controller-manager-csr.json
|
|
||||||
kube-controller-manager-key.pem
|
|
||||||
kube-controller-manager.csr
|
|
||||||
kube-controller-manager.kubeconfig
|
|
||||||
kube-controller-manager.pem
|
|
||||||
kube-scheduler-csr.json
|
|
||||||
kube-scheduler-key.pem
|
|
||||||
kube-scheduler.csr
|
|
||||||
kube-scheduler.kubeconfig
|
|
||||||
kube-scheduler.pem
|
|
||||||
kube-proxy-csr.json
|
|
||||||
kube-proxy-key.pem
|
|
||||||
kube-proxy.csr
|
|
||||||
kube-proxy.kubeconfig
|
|
||||||
kube-proxy.pem
|
|
||||||
kubernetes-csr.json
|
|
||||||
kubernetes-key.pem
|
|
||||||
kubernetes.csr
|
|
||||||
kubernetes.pem
|
|
||||||
worker-0-csr.json
|
|
||||||
worker-0-key.pem
|
|
||||||
worker-0.csr
|
|
||||||
worker-0.kubeconfig
|
|
||||||
worker-0.pem
|
|
||||||
worker-1-csr.json
|
|
||||||
worker-1-key.pem
|
|
||||||
worker-1.csr
|
|
||||||
worker-1.kubeconfig
|
|
||||||
worker-1.pem
|
|
||||||
worker-2-csr.json
|
|
||||||
worker-2-key.pem
|
|
||||||
worker-2.csr
|
|
||||||
worker-2.kubeconfig
|
|
||||||
worker-2.pem
|
|
||||||
service-account-key.pem
|
|
||||||
service-account.csr
|
|
||||||
service-account.pem
|
|
||||||
service-account-csr.json
|
|
||||||
.idea
|
.idea
|
||||||
ubuntu-bionic*.log
|
.vscode
|
||||||
|
ubuntu-jammy*.log
|
||||||
.vagrant
|
.vagrant
|
||||||
temp
|
temp
|
||||||
|
*.crt
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
*.csr
|
||||||
|
*csr.json
|
||||||
|
*.kubeconfig
|
||||||
|
quick-steps
|
||||||
|
venv
|
30
README.md
30
README.md
|
@ -11,8 +11,6 @@ Kubernetes The Hard Way is optimized for learning, which means taking the long r
|
||||||
This tutorial is a modified version of the original developed by [Kelsey Hightower](https://github.com/kelseyhightower/kubernetes-the-hard-way).
|
This tutorial is a modified version of the original developed by [Kelsey Hightower](https://github.com/kelseyhightower/kubernetes-the-hard-way).
|
||||||
While the original one uses GCP as the platform to deploy kubernetes, we use VirtualBox and Vagrant to deploy a cluster on a local machine. If you prefer the cloud version, refer to the original one [here](https://github.com/kelseyhightower/kubernetes-the-hard-way)
|
While the original one uses GCP as the platform to deploy kubernetes, we use VirtualBox and Vagrant to deploy a cluster on a local machine. If you prefer the cloud version, refer to the original one [here](https://github.com/kelseyhightower/kubernetes-the-hard-way)
|
||||||
|
|
||||||
Another difference is that we use Docker instead of containerd. There are a few other differences to the original and they are documented [here](docs/differences-to-original.md)
|
|
||||||
|
|
||||||
> The results of this tutorial should not be viewed as production ready, and may receive limited support from the community, but don't let that stop you from learning!
|
> The results of this tutorial should not be viewed as production ready, and may receive limited support from the community, but don't let that stop you from learning!
|
||||||
|
|
||||||
## Target Audience
|
## Target Audience
|
||||||
|
@ -23,12 +21,12 @@ The target audience for this tutorial is someone planning to support a productio
|
||||||
|
|
||||||
Kubernetes The Hard Way guides you through bootstrapping a highly available Kubernetes cluster with end-to-end encryption between components and RBAC authentication.
|
Kubernetes The Hard Way guides you through bootstrapping a highly available Kubernetes cluster with end-to-end encryption between components and RBAC authentication.
|
||||||
|
|
||||||
* [Kubernetes](https://github.com/kubernetes/kubernetes) 1.13.0
|
* [Kubernetes](https://github.com/kubernetes/kubernetes) 1.24.3
|
||||||
* [Docker Container Runtime](https://github.com/containerd/containerd) 18.06
|
* [Container Runtime](https://github.com/containerd/containerd) 1.5.9
|
||||||
* [CNI Container Networking](https://github.com/containernetworking/cni) 0.7.5
|
* [CNI Container Networking](https://github.com/containernetworking/cni) 0.8.6
|
||||||
* [Weave Networking](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/)
|
* [Weave Networking](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/)
|
||||||
* [etcd](https://github.com/coreos/etcd) v3.3.9
|
* [etcd](https://github.com/coreos/etcd) v3.5.3
|
||||||
* [CoreDNS](https://github.com/coredns/coredns) v1.2.2
|
* [CoreDNS](https://github.com/coredns/coredns) v1.8.6
|
||||||
|
|
||||||
## Labs
|
## Labs
|
||||||
|
|
||||||
|
@ -40,13 +38,13 @@ Kubernetes The Hard Way guides you through bootstrapping a highly available Kube
|
||||||
* [Generating the Data Encryption Config and Key](docs/06-data-encryption-keys.md)
|
* [Generating the Data Encryption Config and Key](docs/06-data-encryption-keys.md)
|
||||||
* [Bootstrapping the etcd Cluster](docs/07-bootstrapping-etcd.md)
|
* [Bootstrapping the etcd Cluster](docs/07-bootstrapping-etcd.md)
|
||||||
* [Bootstrapping the Kubernetes Control Plane](docs/08-bootstrapping-kubernetes-controllers.md)
|
* [Bootstrapping the Kubernetes Control Plane](docs/08-bootstrapping-kubernetes-controllers.md)
|
||||||
* [Bootstrapping the Kubernetes Worker Nodes](docs/09-bootstrapping-kubernetes-workers.md)
|
* [Installing CRI on Worker Nodes](docs/09-install-cri-workers.md)
|
||||||
* [TLS Bootstrapping the Kubernetes Worker Nodes](docs/10-tls-bootstrapping-kubernetes-workers.md)
|
* [Bootstrapping the Kubernetes Worker Nodes](docs/10-bootstrapping-kubernetes-workers.md)
|
||||||
* [Configuring kubectl for Remote Access](docs/11-configuring-kubectl.md)
|
* [TLS Bootstrapping the Kubernetes Worker Nodes](docs/11-tls-bootstrapping-kubernetes-workers.md)
|
||||||
* [Deploy Weave - Pod Networking Solution](docs/12-configure-pod-networking.md)
|
* [Configuring kubectl for Remote Access](docs/12-configuring-kubectl.md)
|
||||||
* [Kube API Server to Kubelet Configuration](docs/13-kube-apiserver-to-kubelet.md)
|
* [Deploy Weave - Pod Networking Solution](docs/13-configure-pod-networking.md)
|
||||||
* [Deploying the DNS Cluster Add-on](docs/14-dns-addon.md)
|
* [Kube API Server to Kubelet Configuration](docs/14-kube-apiserver-to-kubelet.md)
|
||||||
* [Smoke Test](docs/15-smoke-test.md)
|
* [Deploying the DNS Cluster Add-on](docs/15-dns-addon.md)
|
||||||
* [E2E Test](docs/16-e2e-tests.md)
|
* [Smoke Test](docs/16-smoke-test.md)
|
||||||
* [Extra - Dynamic Kubelet Configuration](docs/17-extra-dynamic-kubelet-configuration.md)
|
* [E2E Test](docs/17-e2e-tests.md)
|
||||||
* [Extra - Certificate Verification](docs/verify-certificates.md)
|
* [Extra - Certificate Verification](docs/verify-certificates.md)
|
||||||
|
|
|
@ -4,7 +4,7 @@ metadata:
|
||||||
name: coredns
|
name: coredns
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
|
@ -22,7 +22,7 @@ rules:
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
|
@ -48,14 +48,19 @@ data:
|
||||||
Corefile: |
|
Corefile: |
|
||||||
.:53 {
|
.:53 {
|
||||||
errors
|
errors
|
||||||
health
|
health {
|
||||||
|
lameduck 5s
|
||||||
|
}
|
||||||
|
ready
|
||||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||||
pods insecure
|
pods insecure
|
||||||
upstream
|
fallthrough in-addr.arpa ip6.arpa
|
||||||
fallthrough in-addr.arpa ip6.arpa
|
ttl 30
|
||||||
}
|
}
|
||||||
prometheus :9153
|
prometheus :9153
|
||||||
proxy . /etc/resolv.conf
|
forward . /etc/resolv.conf {
|
||||||
|
max_concurrent 1000
|
||||||
|
}
|
||||||
cache 30
|
cache 30
|
||||||
loop
|
loop
|
||||||
reload
|
reload
|
||||||
|
@ -92,7 +97,7 @@ spec:
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
containers:
|
containers:
|
||||||
- name: coredns
|
- name: coredns
|
||||||
image: coredns/coredns:1.2.2
|
image: coredns/coredns:1.8.6
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
|
|
|
@ -10,14 +10,14 @@
|
||||||
Download and Install [VirtualBox](https://www.virtualbox.org/wiki/Downloads) on any one of the supported platforms:
|
Download and Install [VirtualBox](https://www.virtualbox.org/wiki/Downloads) on any one of the supported platforms:
|
||||||
|
|
||||||
- Windows hosts
|
- Windows hosts
|
||||||
- OS X hosts
|
- OS X hosts (x86 only, not M1)
|
||||||
- Linux distributions
|
- Linux distributions
|
||||||
- Solaris hosts
|
- Solaris hosts
|
||||||
|
|
||||||
## Vagrant
|
## Vagrant
|
||||||
|
|
||||||
Once VirtualBox is installed you may chose to deploy virtual machines manually on it.
|
Once VirtualBox is installed you may chose to deploy virtual machines manually on it.
|
||||||
Vagrant provides an easier way to deploy multiple virtual machines on VirtualBox more consistenlty.
|
Vagrant provides an easier way to deploy multiple virtual machines on VirtualBox more consistently.
|
||||||
|
|
||||||
Download and Install [Vagrant](https://www.vagrantup.com/) on your platform.
|
Download and Install [Vagrant](https://www.vagrantup.com/) on your platform.
|
||||||
|
|
||||||
|
@ -25,4 +25,58 @@ Download and Install [Vagrant](https://www.vagrantup.com/) on your platform.
|
||||||
- Debian
|
- Debian
|
||||||
- Centos
|
- Centos
|
||||||
- Linux
|
- Linux
|
||||||
- macOS
|
- macOS (x86 only, not M1)
|
||||||
|
|
||||||
|
This tutorial assumes that you have also installed Vagrant.
|
||||||
|
|
||||||
|
|
||||||
|
## Lab Defaults
|
||||||
|
|
||||||
|
The labs have been configured with the following networking defaults. If you change any of these after you have deployed any of the lab, you'll need to completely reset it and start again from the beginning:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vagrant destroy -f
|
||||||
|
vagrant up
|
||||||
|
```
|
||||||
|
|
||||||
|
If you do change any of these, please consider that a personal preference and don't submit a PR for it.
|
||||||
|
|
||||||
|
### Virtual Machine Network
|
||||||
|
|
||||||
|
The network used by the Virtual Box virtual machines is `192.168.56.0/24`.
|
||||||
|
|
||||||
|
To change this, edit the [Vagrantfile](../vagrant/Vagrantfile) and set the new value for the network prefix at line 9. This should not overlap any of the other network settings.
|
||||||
|
|
||||||
|
Note that you do not need to edit any of the other scripts to make the above change. It is all managed by shell variable computations based on the assigned VM IP addresses and the values in the hosts file (also computed).
|
||||||
|
|
||||||
|
It is *recommended* that you leave the pod and service networks with the following defaults. If you change them then you will also need to edit one or both of the CoreDNS and Weave networking manifests to accommodate your change.
|
||||||
|
|
||||||
|
### Pod Network
|
||||||
|
|
||||||
|
The network used to assign IP addresses to pods is `10.244.0.0/16`.
|
||||||
|
|
||||||
|
To change this, open all the `.md` files in the [docs](../docs/) directory in your favourite IDE and do a global replace on<br>
|
||||||
|
`POD_CIDR=10.244.0.0/16`<br>
|
||||||
|
with the new CDIR range. This should not overlap any of the other network settings.
|
||||||
|
|
||||||
|
### Service Network
|
||||||
|
|
||||||
|
The network used to assign IP addresses to Cluster IP services is `10.96.0.0/16`.
|
||||||
|
|
||||||
|
To change this, open all the `.md` files in the [docs](../docs/) directory in your favourite IDE and do a global replace on<br>
|
||||||
|
`SERVICE_CIDR=10.96.0.0/16`<br>
|
||||||
|
with the new CDIR range. This should not overlap any of the other network settings.
|
||||||
|
|
||||||
|
Additionally edit line 164 of [coredns.yaml](../deployments/coredns.yaml) to set the new DNS service address (should still end with `.10`)
|
||||||
|
|
||||||
|
## Running Commands in Parallel with tmux
|
||||||
|
|
||||||
|
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. Labs in this tutorial may require running the same commands across multiple compute instances, in those cases consider using tmux and splitting a window into multiple panes with synchronize-panes enabled to speed up the provisioning process.
|
||||||
|
|
||||||
|
> The use of tmux is optional and not required to complete this tutorial.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
> Enable synchronize-panes by pressing `CTRL+B` followed by `"` to split the window into two panes. In each pane (selectable with mouse), ssh to the host(s) you will be working with.</br>Next type `CTRL+X` at the prompt to begin sync. In sync mode, the dividing line between panes will be red. Everything you type or paste in one pane will be echoed in the other.<br>To disable synchronization type `CTRL+X` again.</br></br>Note that the `CTRL-X` key binding is provided by a `.tmux.conf` loaded onto the VM by the vagrant provisioner.
|
||||||
|
|
||||||
|
Next: [Compute Resources](02-compute-resources.md)
|
||||||
|
|
|
@ -4,15 +4,21 @@ Note: You must have VirtualBox and Vagrant configured at this point
|
||||||
|
|
||||||
Download this github repository and cd into the vagrant folder
|
Download this github repository and cd into the vagrant folder
|
||||||
|
|
||||||
`git clone https://github.com/mmumshad/kubernetes-the-hard-way.git`
|
```bash
|
||||||
|
git clone https://github.com/mmumshad/kubernetes-the-hard-way.git
|
||||||
|
```
|
||||||
|
|
||||||
CD into vagrant directory
|
CD into vagrant directory
|
||||||
|
|
||||||
`cd kubernetes-the-hard-way\vagrant`
|
```bash
|
||||||
|
cd kubernetes-the-hard-way\vagrant
|
||||||
|
```
|
||||||
|
|
||||||
Run Vagrant up
|
Run Vagrant up
|
||||||
|
|
||||||
`vagrant up`
|
```bash
|
||||||
|
vagrant up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
This does the below:
|
This does the below:
|
||||||
|
@ -22,26 +28,24 @@ This does the below:
|
||||||
> If you choose to change these settings, please also update vagrant/ubuntu/vagrant/setup-hosts.sh
|
> If you choose to change these settings, please also update vagrant/ubuntu/vagrant/setup-hosts.sh
|
||||||
> to add the additional hosts to the /etc/hosts default before running "vagrant up".
|
> to add the additional hosts to the /etc/hosts default before running "vagrant up".
|
||||||
|
|
||||||
- Set's IP addresses in the range 192.168.5
|
- Set's IP addresses in the range 192.168.56
|
||||||
|
|
||||||
| VM | VM Name | Purpose | IP | Forwarded Port |
|
| VM | VM Name | Purpose | IP | Forwarded Port | RAM |
|
||||||
| ------------ | ---------------------- |:-------------:| ------------:| ----------------:|
|
| ------------ | ---------------------- |:-------------:| -------------:| ----------------:|-----:|
|
||||||
| master-1 | kubernetes-ha-master-1 | Master | 192.168.5.11 | 2711 |
|
| master-1 | kubernetes-ha-master-1 | Master | 192.168.56.11 | 2711 | 2048 |
|
||||||
| master-2 | kubernetes-ha-master-2 | Master | 192.168.5.12 | 2712 |
|
| master-2 | kubernetes-ha-master-2 | Master | 192.168.56.12 | 2712 | 1024 |
|
||||||
| worker-1 | kubernetes-ha-worker-1 | Worker | 192.168.5.21 | 2721 |
|
| worker-1 | kubernetes-ha-worker-1 | Worker | 192.168.56.21 | 2721 | 512 |
|
||||||
| worker-2 | kubernetes-ha-worker-2 | Worker | 192.168.5.22 | 2722 |
|
| worker-2 | kubernetes-ha-worker-2 | Worker | 192.168.56.22 | 2722 | 1024 |
|
||||||
| loadbalancer | kubernetes-ha-lb | LoadBalancer | 192.168.5.30 | 2730 |
|
| loadbalancer | kubernetes-ha-lb | LoadBalancer | 192.168.56.30 | 2730 | 1024 |
|
||||||
|
|
||||||
> These are the default settings. These can be changed in the Vagrant file
|
> These are the default settings. These can be changed in the Vagrant file
|
||||||
|
|
||||||
- Add's a DNS entry to each of the nodes to access internet
|
- Add's a DNS entry to each of the nodes to access internet
|
||||||
> DNS: 8.8.8.8
|
> DNS: 8.8.8.8
|
||||||
|
|
||||||
- Install's Docker on Worker nodes
|
- Sets required kernel settings for kubernetes networking to function correctly.
|
||||||
- Runs the below command on all nodes to allow for network forwarding in IP Tables.
|
|
||||||
This is required for kubernetes networking to function correctly.
|
|
||||||
> sysctl net.bridge.bridge-nf-call-iptables=1
|
|
||||||
|
|
||||||
|
See [Vagrant page](../vagrant/README.md) for details.
|
||||||
|
|
||||||
## SSH to the nodes
|
## SSH to the nodes
|
||||||
|
|
||||||
|
@ -50,7 +54,7 @@ There are two ways to SSH into the nodes:
|
||||||
### 1. SSH using Vagrant
|
### 1. SSH using Vagrant
|
||||||
|
|
||||||
From the directory you ran the `vagrant up` command, run `vagrant ssh <vm>` for example `vagrant ssh master-1`.
|
From the directory you ran the `vagrant up` command, run `vagrant ssh <vm>` for example `vagrant ssh master-1`.
|
||||||
> Note: Use VM field from the above table and not the vm name itself.
|
> Note: Use VM field from the above table and not the VM name itself.
|
||||||
|
|
||||||
### 2. SSH Using SSH Client Tools
|
### 2. SSH Using SSH Client Tools
|
||||||
|
|
||||||
|
@ -61,30 +65,34 @@ Vagrant generates a private key for each of these VMs. It is placed under the .v
|
||||||
|
|
||||||
**Private Key Path:** `.vagrant/machines/<machine name>/virtualbox/private_key`
|
**Private Key Path:** `.vagrant/machines/<machine name>/virtualbox/private_key`
|
||||||
|
|
||||||
**Username:** `vagrant`
|
**Username/Password:** `vagrant/vagrant`
|
||||||
|
|
||||||
|
|
||||||
## Verify Environment
|
## Verify Environment
|
||||||
|
|
||||||
- Ensure all VMs are up
|
- Ensure all VMs are up
|
||||||
- Ensure VMs are assigned the above IP addresses
|
- Ensure VMs are assigned the above IP addresses
|
||||||
- Ensure you can SSH into these VMs using the IP and private keys
|
- Ensure you can SSH into these VMs using the IP and private keys, or `vagrant ssh`
|
||||||
- Ensure the VMs can ping each other
|
- Ensure the VMs can ping each other
|
||||||
- Ensure the worker nodes have Docker installed on them. Version: 18.06
|
|
||||||
> command `sudo docker version`
|
|
||||||
|
|
||||||
## Troubleshooting Tips
|
## Troubleshooting Tips
|
||||||
|
|
||||||
1. If any of the VMs failed to provision, or is not configured correct, delete the vm using the command:
|
### Failed Provisioning
|
||||||
|
|
||||||
`vagrant destroy <vm>`
|
If any of the VMs failed to provision, or is not configured correct, delete the VM using the command:
|
||||||
|
|
||||||
Then reprovision. Only the missing VMs will be re-provisioned
|
```bash
|
||||||
|
vagrant destroy <vm>
|
||||||
|
```
|
||||||
|
|
||||||
`vagrant up`
|
Then re-provision. Only the missing VMs will be re-provisioned
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vagrant up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Sometimes the delete does not delete the folder created for the vm and throws the below error.
|
Sometimes the delete does not delete the folder created for the VM and throws an error similar to this:
|
||||||
|
|
||||||
VirtualBox error:
|
VirtualBox error:
|
||||||
|
|
||||||
|
@ -92,11 +100,38 @@ VirtualBox error:
|
||||||
VBoxManage.exe: error: Details: code E_FAIL (0x80004005), component SessionMachine, interface IMachine, callee IUnknown
|
VBoxManage.exe: error: Details: code E_FAIL (0x80004005), component SessionMachine, interface IMachine, callee IUnknown
|
||||||
VBoxManage.exe: error: Context: "SaveSettings()" at line 3105 of file VBoxManageModifyVM.cpp
|
VBoxManage.exe: error: Context: "SaveSettings()" at line 3105 of file VBoxManageModifyVM.cpp
|
||||||
|
|
||||||
In such cases delete the VM, then delete the VM folder and then re-provision
|
In such cases delete the VM, then delete the VM folder and then re-provision, e.g.
|
||||||
|
|
||||||
`vagrant destroy <vm>`
|
```bash
|
||||||
|
vagrant destroy worker-2
|
||||||
|
rmdir "<path-to-vm-folder>\kubernetes-ha-worker-2
|
||||||
|
vagrant up
|
||||||
|
```
|
||||||
|
|
||||||
`rmdir "<path-to-vm-folder>\kubernetes-ha-worker-2"`
|
### Provisioner gets stuck
|
||||||
|
|
||||||
`vagrant up`
|
This will most likely happen at "Waiting for machine to reboot"
|
||||||
|
|
||||||
|
1. Hit `CTRL+C`
|
||||||
|
1. Kill any running `ruby` process, or Vagrant will complain.
|
||||||
|
1. Destroy the VM that got stuck: `vagrant destroy <vm>`
|
||||||
|
1. Re-provision. It will pick up where it left off: `vagrant up`
|
||||||
|
|
||||||
|
# Pausing the Environment
|
||||||
|
|
||||||
|
You do not need to complete the entire lab in one session. You may shut down and resume the environment as follows, if you need to power off your computer.
|
||||||
|
|
||||||
|
To shut down. This will gracefully shut down all the VMs in the reverse order to which they were started:
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant halt
|
||||||
|
```
|
||||||
|
|
||||||
|
To power on again:
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant up
|
||||||
|
```
|
||||||
|
|
||||||
|
Prev: [Prerequisites](01-prerequisites.md)<br>
|
||||||
|
Next: [Client tools](03-client-tools.md)
|
|
@ -2,30 +2,41 @@
|
||||||
|
|
||||||
First identify a system from where you will perform administrative tasks, such as creating certificates, kubeconfig files and distributing them to the different VMs.
|
First identify a system from where you will perform administrative tasks, such as creating certificates, kubeconfig files and distributing them to the different VMs.
|
||||||
|
|
||||||
If you are on a Linux laptop, then your laptop could be this system. In my case I chose the master-1 node to perform administrative tasks. Whichever system you chose make sure that system is able to access all the provisioned VMs through SSH to copy files over.
|
If you are on a Linux laptop, then your laptop could be this system. In my case I chose the `master-1` node to perform administrative tasks. Whichever system you chose make sure that system is able to access all the provisioned VMs through SSH to copy files over.
|
||||||
|
|
||||||
## Access all VMs
|
## Access all VMs
|
||||||
|
|
||||||
Generate Key Pair on master-1 node
|
Here we create an SSH key pair for the `vagrant` user who we are logged in as. We will copy the public key of this pair to the other master and both workers to permit us to use password-less SSH (and SCP) go get from `master-1` to these other nodes in the context of the `vagrant` user which exists on all nodes.
|
||||||
`$ssh-keygen`
|
|
||||||
|
Generate Key Pair on `master-1` node
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh-keygen
|
||||||
|
```
|
||||||
|
|
||||||
Leave all settings to default.
|
Leave all settings to default.
|
||||||
|
|
||||||
View the generated public key ID at:
|
View the generated public key ID at:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
$cat .ssh/id_rsa.pub
|
cat ~/.ssh/id_rsa.pub
|
||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD......8+08b vagrant@master-1
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Move public key of master to all other VMs
|
Add this key to the local authorized_keys (`master-1`) as in some commands we scp to ourself
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
|
||||||
```
|
```
|
||||||
$cat >> ~/.ssh/authorized_keys <<EOF
|
|
||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD......8+08b vagrant@master-1
|
Copy the output into a notepad and form it into the following command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat >> ~/.ssh/authorized_keys <<EOF
|
||||||
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD...OUTPUT-FROM-ABOVE-COMMAND...8+08b vagrant@master-1
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Now ssh to each of the other nodes and paste the above from your notepad at each command prompt.
|
||||||
|
|
||||||
## Install kubectl
|
## Install kubectl
|
||||||
|
|
||||||
|
@ -35,30 +46,40 @@ Reference: [https://kubernetes.io/docs/tasks/tools/install-kubectl/](https://kub
|
||||||
|
|
||||||
### Linux
|
### Linux
|
||||||
|
|
||||||
```
|
```bash
|
||||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl
|
wget https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
chmod +x kubectl
|
chmod +x kubectl
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo mv kubectl /usr/local/bin/
|
sudo mv kubectl /usr/local/bin/
|
||||||
```
|
```
|
||||||
|
|
||||||
### Verification
|
### Verification
|
||||||
|
|
||||||
Verify `kubectl` version 1.13.0 or higher is installed:
|
Verify `kubectl` version 1.24.3 or higher is installed:
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl version --client
|
kubectl version -o yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
> output
|
> output
|
||||||
|
|
||||||
```
|
```
|
||||||
Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.0", GitCommit:"ddf47ac13c1a9483ea035a79cd7c10005ff21a6d", GitTreeState:"clean", BuildDate:"2018-12-03T21:04:45Z", GoVersion:"go1.11.2", Compiler:"gc", Platform:"linux/amd64"}
|
kubectl version -o yaml
|
||||||
|
clientVersion:
|
||||||
|
buildDate: "2022-07-13T14:30:46Z"
|
||||||
|
compiler: gc
|
||||||
|
gitCommit: aef86a93758dc3cb2c658dd9657ab4ad4afc21cb
|
||||||
|
gitTreeState: clean
|
||||||
|
gitVersion: v1.24.3
|
||||||
|
goVersion: go1.18.3
|
||||||
|
major: "1"
|
||||||
|
minor: "24"
|
||||||
|
platform: linux/amd64
|
||||||
|
kustomizeVersion: v4.5.4
|
||||||
|
|
||||||
|
The connection to the server localhost:8080 was refused - did you specify the right host or port?
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Don't worry about the error at the end as it is expected. We have not set anything up yet!
|
||||||
|
|
||||||
|
Prev: [Compute Resources](02-compute-resources.md)<br>
|
||||||
Next: [Certificate Authority](04-certificate-authority.md)
|
Next: [Certificate Authority](04-certificate-authority.md)
|
||||||
|
|
|
@ -8,26 +8,46 @@ You can do these on any machine with `openssl` on it. But you should be able to
|
||||||
|
|
||||||
In our case we do it on the master-1 node, as we have set it up to be the administrative client.
|
In our case we do it on the master-1 node, as we have set it up to be the administrative client.
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
## Certificate Authority
|
## Certificate Authority
|
||||||
|
|
||||||
In this section you will provision a Certificate Authority that can be used to generate additional TLS certificates.
|
In this section you will provision a Certificate Authority that can be used to generate additional TLS certificates.
|
||||||
|
|
||||||
|
Query IPs of hosts we will insert as certificate subject alternative names (SANs), which will be read from `/etc/hosts`. Note that doing this allows us to change the VM network range more easily from the default for these labs which is `192.168.56.0/24`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
MASTER_1=$(dig +short master-1)
|
||||||
|
MASTER_2=$(dig +short master-2)
|
||||||
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
|
```
|
||||||
|
|
||||||
|
Compute cluster internal API server service address, which is always .1 in the service CIDR range. This is also required as a SAN in the API server certificate
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SERVICE_CIDR=10.96.0.0/24
|
||||||
|
API_SERVICE=$(echo $SERVICE_CIDR | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s.1", $1, $2, $3) }')
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Create a CA certificate, then generate a Certificate Signing Request and use it to create a private key:
|
Create a CA certificate, then generate a Certificate Signing Request and use it to create a private key:
|
||||||
|
|
||||||
|
|
||||||
```
|
```bash
|
||||||
# Create private key for CA
|
{
|
||||||
openssl genrsa -out ca.key 2048
|
# Create private key for CA
|
||||||
|
openssl genrsa -out ca.key 2048
|
||||||
|
|
||||||
# Comment line starting with RANDFILE in /etc/ssl/openssl.cnf definition to avoid permission issues
|
# Comment line starting with RANDFILE in /etc/ssl/openssl.cnf definition to avoid permission issues
|
||||||
sudo sed -i '0,/RANDFILE/{s/RANDFILE/\#&/}' /etc/ssl/openssl.cnf
|
sudo sed -i '0,/RANDFILE/{s/RANDFILE/\#&/}' /etc/ssl/openssl.cnf
|
||||||
|
|
||||||
# Create CSR using the private key
|
# Create CSR using the private key
|
||||||
openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA" -out ca.csr
|
openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA/O=Kubernetes" -out ca.csr
|
||||||
|
|
||||||
# Self sign the csr using its own private key
|
# Self sign the csr using its own private key
|
||||||
openssl x509 -req -in ca.csr -signkey ca.key -CAcreateserial -out ca.crt -days 1000
|
openssl x509 -req -in ca.csr -signkey ca.key -CAcreateserial -out ca.crt -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
Results:
|
Results:
|
||||||
|
|
||||||
|
@ -36,11 +56,11 @@ ca.crt
|
||||||
ca.key
|
ca.key
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference : https://kubernetes.io/docs/concepts/cluster-administration/certificates/#openssl
|
Reference : https://kubernetes.io/docs/tasks/administer-cluster/certificates/#openssl
|
||||||
|
|
||||||
The ca.crt is the Kubernetes Certificate Authority certificate and ca.key is the Kubernetes Certificate Authority private key.
|
The ca.crt is the Kubernetes Certificate Authority certificate and ca.key is the Kubernetes Certificate Authority private key.
|
||||||
You will use the ca.crt file in many places, so it will be copied to many places.
|
You will use the ca.crt file in many places, so it will be copied to many places.
|
||||||
The ca.key is used by the CA for signing certificates. And it should be securely stored. In this case our master node(s) is our CA server as well, so we will store it on master node(s). There is not need to copy this file to elsewhere.
|
The ca.key is used by the CA for signing certificates. And it should be securely stored. In this case our master node(s) is our CA server as well, so we will store it on master node(s). There is no need to copy this file elsewhere.
|
||||||
|
|
||||||
## Client and Server Certificates
|
## Client and Server Certificates
|
||||||
|
|
||||||
|
@ -50,15 +70,17 @@ In this section you will generate client and server certificates for each Kubern
|
||||||
|
|
||||||
Generate the `admin` client certificate and private key:
|
Generate the `admin` client certificate and private key:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
# Generate private key for admin user
|
{
|
||||||
openssl genrsa -out admin.key 2048
|
# Generate private key for admin user
|
||||||
|
openssl genrsa -out admin.key 2048
|
||||||
|
|
||||||
# Generate CSR for admin user. Note the OU.
|
# Generate CSR for admin user. Note the OU.
|
||||||
openssl req -new -key admin.key -subj "/CN=admin/O=system:masters" -out admin.csr
|
openssl req -new -key admin.key -subj "/CN=admin/O=system:masters" -out admin.csr
|
||||||
|
|
||||||
# Sign certificate for admin user using CA servers private key
|
# Sign certificate for admin user using CA servers private key
|
||||||
openssl x509 -req -in admin.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out admin.crt -days 1000
|
openssl x509 -req -in admin.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out admin.crt -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the admin user is part of the **system:masters** group. This is how we are able to perform any administrative operations on Kubernetes cluster using kubectl utility.
|
Note that the admin user is part of the **system:masters** group. This is how we are able to perform any administrative operations on Kubernetes cluster using kubectl utility.
|
||||||
|
@ -81,10 +103,16 @@ For now let's just focus on the control plane components.
|
||||||
|
|
||||||
Generate the `kube-controller-manager` client certificate and private key:
|
Generate the `kube-controller-manager` client certificate and private key:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
openssl genrsa -out kube-controller-manager.key 2048
|
{
|
||||||
openssl req -new -key kube-controller-manager.key -subj "/CN=system:kube-controller-manager" -out kube-controller-manager.csr
|
openssl genrsa -out kube-controller-manager.key 2048
|
||||||
openssl x509 -req -in kube-controller-manager.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-controller-manager.crt -days 1000
|
|
||||||
|
openssl req -new -key kube-controller-manager.key \
|
||||||
|
-subj "/CN=system:kube-controller-manager/O=system:kube-controller-manager" -out kube-controller-manager.csr
|
||||||
|
|
||||||
|
openssl x509 -req -in kube-controller-manager.csr \
|
||||||
|
-CA ca.crt -CAkey ca.key -CAcreateserial -out kube-controller-manager.crt -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Results:
|
Results:
|
||||||
|
@ -100,10 +128,16 @@ kube-controller-manager.crt
|
||||||
Generate the `kube-proxy` client certificate and private key:
|
Generate the `kube-proxy` client certificate and private key:
|
||||||
|
|
||||||
|
|
||||||
```
|
```bash
|
||||||
openssl genrsa -out kube-proxy.key 2048
|
{
|
||||||
openssl req -new -key kube-proxy.key -subj "/CN=system:kube-proxy" -out kube-proxy.csr
|
openssl genrsa -out kube-proxy.key 2048
|
||||||
openssl x509 -req -in kube-proxy.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-proxy.crt -days 1000
|
|
||||||
|
openssl req -new -key kube-proxy.key \
|
||||||
|
-subj "/CN=system:kube-proxy/O=system:node-proxier" -out kube-proxy.csr
|
||||||
|
|
||||||
|
openssl x509 -req -in kube-proxy.csr \
|
||||||
|
-CA ca.crt -CAkey ca.key -CAcreateserial -out kube-proxy.crt -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Results:
|
Results:
|
||||||
|
@ -119,10 +153,15 @@ Generate the `kube-scheduler` client certificate and private key:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
```
|
```bash
|
||||||
openssl genrsa -out kube-scheduler.key 2048
|
{
|
||||||
openssl req -new -key kube-scheduler.key -subj "/CN=system:kube-scheduler" -out kube-scheduler.csr
|
openssl genrsa -out kube-scheduler.key 2048
|
||||||
openssl x509 -req -in kube-scheduler.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-scheduler.crt -days 1000
|
|
||||||
|
openssl req -new -key kube-scheduler.key \
|
||||||
|
-subj "/CN=system:kube-scheduler/O=system:kube-scheduler" -out kube-scheduler.csr
|
||||||
|
|
||||||
|
openssl x509 -req -in kube-scheduler.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-scheduler.crt -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Results:
|
Results:
|
||||||
|
@ -138,35 +177,43 @@ The kube-apiserver certificate requires all names that various components may re
|
||||||
|
|
||||||
The `openssl` command cannot take alternate names as command line parameter. So we must create a `conf` file for it:
|
The `openssl` command cannot take alternate names as command line parameter. So we must create a `conf` file for it:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat > openssl.cnf <<EOF
|
cat > openssl.cnf <<EOF
|
||||||
[req]
|
[req]
|
||||||
req_extensions = v3_req
|
req_extensions = v3_req
|
||||||
distinguished_name = req_distinguished_name
|
distinguished_name = req_distinguished_name
|
||||||
[req_distinguished_name]
|
[req_distinguished_name]
|
||||||
[ v3_req ]
|
[v3_req]
|
||||||
basicConstraints = CA:FALSE
|
basicConstraints = critical, CA:FALSE
|
||||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = serverAuth
|
||||||
subjectAltName = @alt_names
|
subjectAltName = @alt_names
|
||||||
[alt_names]
|
[alt_names]
|
||||||
DNS.1 = kubernetes
|
DNS.1 = kubernetes
|
||||||
DNS.2 = kubernetes.default
|
DNS.2 = kubernetes.default
|
||||||
DNS.3 = kubernetes.default.svc
|
DNS.3 = kubernetes.default.svc
|
||||||
DNS.4 = kubernetes.default.svc.cluster.local
|
DNS.4 = kubernetes.default.svc.cluster
|
||||||
IP.1 = 10.96.0.1
|
DNS.5 = kubernetes.default.svc.cluster.local
|
||||||
IP.2 = 192.168.5.11
|
IP.1 = ${API_SERVICE}
|
||||||
IP.3 = 192.168.5.12
|
IP.2 = ${MASTER_1}
|
||||||
IP.4 = 192.168.5.30
|
IP.3 = ${MASTER_2}
|
||||||
|
IP.4 = ${LOADBALANCER}
|
||||||
IP.5 = 127.0.0.1
|
IP.5 = 127.0.0.1
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
Generates certs for kube-apiserver
|
Generate certs for kube-apiserver
|
||||||
|
|
||||||
```
|
```bash
|
||||||
openssl genrsa -out kube-apiserver.key 2048
|
{
|
||||||
openssl req -new -key kube-apiserver.key -subj "/CN=kube-apiserver" -out kube-apiserver.csr -config openssl.cnf
|
openssl genrsa -out kube-apiserver.key 2048
|
||||||
openssl x509 -req -in kube-apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-apiserver.crt -extensions v3_req -extfile openssl.cnf -days 1000
|
|
||||||
|
openssl req -new -key kube-apiserver.key \
|
||||||
|
-subj "/CN=kube-apiserver/O=Kubernetes" -out kube-apiserver.csr -config openssl.cnf
|
||||||
|
|
||||||
|
openssl x509 -req -in kube-apiserver.csr \
|
||||||
|
-CA ca.crt -CAkey ca.key -CAcreateserial -out kube-apiserver.crt -extensions v3_req -extfile openssl.cnf -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Results:
|
Results:
|
||||||
|
@ -176,13 +223,52 @@ kube-apiserver.crt
|
||||||
kube-apiserver.key
|
kube-apiserver.key
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# The Kubelet Client Certificate
|
||||||
|
|
||||||
|
This certificate is for the api server to authenticate with the kubelets when it requests information from them
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat > openssl-kubelet.cnf <<EOF
|
||||||
|
[req]
|
||||||
|
req_extensions = v3_req
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
[req_distinguished_name]
|
||||||
|
[v3_req]
|
||||||
|
basicConstraints = critical, CA:FALSE
|
||||||
|
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
Generate certs for kubelet authentication
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
openssl genrsa -out apiserver-kubelet-client.key 2048
|
||||||
|
|
||||||
|
openssl req -new -key apiserver-kubelet-client.key \
|
||||||
|
-subj "/CN=kube-apiserver-kubelet-client/O=system:masters" -out apiserver-kubelet-client.csr -config openssl-kubelet.cnf
|
||||||
|
|
||||||
|
openssl x509 -req -in apiserver-kubelet-client.csr \
|
||||||
|
-CA ca.crt -CAkey ca.key -CAcreateserial -out apiserver-kubelet-client.crt -extensions v3_req -extfile openssl-kubelet.cnf -days 1000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Results:
|
||||||
|
|
||||||
|
```
|
||||||
|
apiserver-kubelet-client.crt
|
||||||
|
apiserver-kubelet-client.key
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### The ETCD Server Certificate
|
### The ETCD Server Certificate
|
||||||
|
|
||||||
Similarly ETCD server certificate must have addresses of all the servers part of the ETCD cluster
|
Similarly ETCD server certificate must have addresses of all the servers part of the ETCD cluster
|
||||||
|
|
||||||
The `openssl` command cannot take alternate names as command line parameter. So we must create a `conf` file for it:
|
The `openssl` command cannot take alternate names as command line parameter. So we must create a `conf` file for it:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat > openssl-etcd.cnf <<EOF
|
cat > openssl-etcd.cnf <<EOF
|
||||||
[req]
|
[req]
|
||||||
req_extensions = v3_req
|
req_extensions = v3_req
|
||||||
|
@ -193,18 +279,24 @@ basicConstraints = CA:FALSE
|
||||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||||
subjectAltName = @alt_names
|
subjectAltName = @alt_names
|
||||||
[alt_names]
|
[alt_names]
|
||||||
IP.1 = 192.168.5.11
|
IP.1 = ${MASTER_1}
|
||||||
IP.2 = 192.168.5.12
|
IP.2 = ${MASTER_2}
|
||||||
IP.3 = 127.0.0.1
|
IP.3 = 127.0.0.1
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
Generates certs for ETCD
|
Generates certs for ETCD
|
||||||
|
|
||||||
```
|
```bash
|
||||||
openssl genrsa -out etcd-server.key 2048
|
{
|
||||||
openssl req -new -key etcd-server.key -subj "/CN=etcd-server" -out etcd-server.csr -config openssl-etcd.cnf
|
openssl genrsa -out etcd-server.key 2048
|
||||||
openssl x509 -req -in etcd-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out etcd-server.crt -extensions v3_req -extfile openssl-etcd.cnf -days 1000
|
|
||||||
|
openssl req -new -key etcd-server.key \
|
||||||
|
-subj "/CN=etcd-server/O=Kubernetes" -out etcd-server.csr -config openssl-etcd.cnf
|
||||||
|
|
||||||
|
openssl x509 -req -in etcd-server.csr \
|
||||||
|
-CA ca.crt -CAkey ca.key -CAcreateserial -out etcd-server.crt -extensions v3_req -extfile openssl-etcd.cnf -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Results:
|
Results:
|
||||||
|
@ -220,10 +312,16 @@ The Kubernetes Controller Manager leverages a key pair to generate and sign serv
|
||||||
|
|
||||||
Generate the `service-account` certificate and private key:
|
Generate the `service-account` certificate and private key:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
openssl genrsa -out service-account.key 2048
|
{
|
||||||
openssl req -new -key service-account.key -subj "/CN=service-accounts" -out service-account.csr
|
openssl genrsa -out service-account.key 2048
|
||||||
openssl x509 -req -in service-account.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out service-account.crt -days 1000
|
|
||||||
|
openssl req -new -key service-account.key \
|
||||||
|
-subj "/CN=service-accounts/O=Kubernetes" -out service-account.csr
|
||||||
|
|
||||||
|
openssl x509 -req -in service-account.csr \
|
||||||
|
-CA ca.crt -CAkey ca.key -CAcreateserial -out service-account.crt -days 1000
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Results:
|
Results:
|
||||||
|
@ -233,20 +331,51 @@ service-account.key
|
||||||
service-account.crt
|
service-account.crt
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Verify the PKI
|
||||||
|
|
||||||
|
Run the following, and select option 1 to check all required certificates were generated.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cert_verify.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
> Expected output
|
||||||
|
|
||||||
|
```
|
||||||
|
PKI generated correctly!
|
||||||
|
```
|
||||||
|
|
||||||
|
If there are any errors, please review above steps and then re-verify
|
||||||
|
|
||||||
## Distribute the Certificates
|
## Distribute the Certificates
|
||||||
|
|
||||||
Copy the appropriate certificates and private keys to each controller instance:
|
Copy the appropriate certificates and private keys to each instance:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
|
{
|
||||||
for instance in master-1 master-2; do
|
for instance in master-1 master-2; do
|
||||||
scp ca.crt ca.key kube-apiserver.key kube-apiserver.crt \
|
scp ca.crt ca.key kube-apiserver.key kube-apiserver.crt \
|
||||||
|
apiserver-kubelet-client.crt apiserver-kubelet-client.key \
|
||||||
service-account.key service-account.crt \
|
service-account.key service-account.crt \
|
||||||
etcd-server.key etcd-server.crt \
|
etcd-server.key etcd-server.crt \
|
||||||
|
kube-controller-manager.key kube-controller-manager.crt \
|
||||||
|
kube-scheduler.key kube-scheduler.crt \
|
||||||
${instance}:~/
|
${instance}:~/
|
||||||
done
|
done
|
||||||
|
|
||||||
|
for instance in worker-1 worker-2 ; do
|
||||||
|
scp ca.crt kube-proxy.crt kube-proxy.key ${instance}:~/
|
||||||
|
done
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
> The `kube-proxy`, `kube-controller-manager`, `kube-scheduler`, and `kubelet` client certificates will be used to generate client authentication configuration files in the next lab. These certificates will be embedded into the client authentication configuration files. We will then copy those configuration files to the other master nodes.
|
## Optional - Check Certificates
|
||||||
|
|
||||||
|
At `master-1` and `master-2` nodes, run the following, selecting option 1
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cert_verify.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Prev: [Client tools](03-client-tools.md)<br>
|
||||||
Next: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md)
|
Next: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md)
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
# Generating Kubernetes Configuration Files for Authentication
|
# Generating Kubernetes Configuration Files for Authentication
|
||||||
|
|
||||||
In this lab you will generate [Kubernetes configuration files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/), also known as kubeconfigs, which enable Kubernetes clients to locate and authenticate to the Kubernetes API Servers.
|
In this lab you will generate [Kubernetes configuration files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/), also known as "kubeconfigs", which enable Kubernetes clients to locate and authenticate to the Kubernetes API Servers.
|
||||||
|
|
||||||
|
Note: It is good practice to use file paths to certificates in kubeconfigs that will be used by the services. When certificates are updated, it is not necessary to regenerate the config files, as you would have to if the certificate data was embedded. Note also that the cert files don't exist in these paths yet - we will place them in later labs.
|
||||||
|
|
||||||
|
User configs, like admin.kubeconfig will have the certificate info embedded within them.
|
||||||
|
|
||||||
## Client Authentication Configs
|
## Client Authentication Configs
|
||||||
|
|
||||||
|
@ -8,28 +12,28 @@ In this section you will generate kubeconfig files for the `controller manager`,
|
||||||
|
|
||||||
### Kubernetes Public IP Address
|
### Kubernetes Public IP Address
|
||||||
|
|
||||||
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the load balancer will be used. In our case it is `192.168.5.30`
|
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the load balancer will be used, so let's first get the address of the loadbalancer into a shell variable such that we can use it in the kubeconfigs for services that run on worker nodes. The controller manager and scheduler need to talk to the local API server, hence they use the localhost address.
|
||||||
|
|
||||||
```
|
[//]: # (host:master-1)
|
||||||
LOADBALANCER_ADDRESS=192.168.5.30
|
|
||||||
|
```bash
|
||||||
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
```
|
```
|
||||||
|
|
||||||
### The kube-proxy Kubernetes Configuration File
|
### The kube-proxy Kubernetes Configuration File
|
||||||
|
|
||||||
Generate a kubeconfig file for the `kube-proxy` service:
|
Generate a kubeconfig file for the `kube-proxy` service:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
kubectl config set-cluster kubernetes-the-hard-way \
|
kubectl config set-cluster kubernetes-the-hard-way \
|
||||||
--certificate-authority=ca.crt \
|
--certificate-authority=/var/lib/kubernetes/pki/ca.crt \
|
||||||
--embed-certs=true \
|
--server=https://${LOADBALANCER}:6443 \
|
||||||
--server=https://${LOADBALANCER_ADDRESS}:6443 \
|
|
||||||
--kubeconfig=kube-proxy.kubeconfig
|
--kubeconfig=kube-proxy.kubeconfig
|
||||||
|
|
||||||
kubectl config set-credentials system:kube-proxy \
|
kubectl config set-credentials system:kube-proxy \
|
||||||
--client-certificate=kube-proxy.crt \
|
--client-certificate=/var/lib/kubernetes/pki/kube-proxy.crt \
|
||||||
--client-key=kube-proxy.key \
|
--client-key=/var/lib/kubernetes/pki/kube-proxy.key \
|
||||||
--embed-certs=true \
|
|
||||||
--kubeconfig=kube-proxy.kubeconfig
|
--kubeconfig=kube-proxy.kubeconfig
|
||||||
|
|
||||||
kubectl config set-context default \
|
kubectl config set-context default \
|
||||||
|
@ -53,18 +57,16 @@ Reference docs for kube-proxy [here](https://kubernetes.io/docs/reference/comman
|
||||||
|
|
||||||
Generate a kubeconfig file for the `kube-controller-manager` service:
|
Generate a kubeconfig file for the `kube-controller-manager` service:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
kubectl config set-cluster kubernetes-the-hard-way \
|
kubectl config set-cluster kubernetes-the-hard-way \
|
||||||
--certificate-authority=ca.crt \
|
--certificate-authority=/var/lib/kubernetes/pki/ca.crt \
|
||||||
--embed-certs=true \
|
|
||||||
--server=https://127.0.0.1:6443 \
|
--server=https://127.0.0.1:6443 \
|
||||||
--kubeconfig=kube-controller-manager.kubeconfig
|
--kubeconfig=kube-controller-manager.kubeconfig
|
||||||
|
|
||||||
kubectl config set-credentials system:kube-controller-manager \
|
kubectl config set-credentials system:kube-controller-manager \
|
||||||
--client-certificate=kube-controller-manager.crt \
|
--client-certificate=/var/lib/kubernetes/pki/kube-controller-manager.crt \
|
||||||
--client-key=kube-controller-manager.key \
|
--client-key=/var/lib/kubernetes/pki/kube-controller-manager.key \
|
||||||
--embed-certs=true \
|
|
||||||
--kubeconfig=kube-controller-manager.kubeconfig
|
--kubeconfig=kube-controller-manager.kubeconfig
|
||||||
|
|
||||||
kubectl config set-context default \
|
kubectl config set-context default \
|
||||||
|
@ -88,18 +90,16 @@ Reference docs for kube-controller-manager [here](https://kubernetes.io/docs/ref
|
||||||
|
|
||||||
Generate a kubeconfig file for the `kube-scheduler` service:
|
Generate a kubeconfig file for the `kube-scheduler` service:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
kubectl config set-cluster kubernetes-the-hard-way \
|
kubectl config set-cluster kubernetes-the-hard-way \
|
||||||
--certificate-authority=ca.crt \
|
--certificate-authority=/var/lib/kubernetes/pki/ca.crt \
|
||||||
--embed-certs=true \
|
|
||||||
--server=https://127.0.0.1:6443 \
|
--server=https://127.0.0.1:6443 \
|
||||||
--kubeconfig=kube-scheduler.kubeconfig
|
--kubeconfig=kube-scheduler.kubeconfig
|
||||||
|
|
||||||
kubectl config set-credentials system:kube-scheduler \
|
kubectl config set-credentials system:kube-scheduler \
|
||||||
--client-certificate=kube-scheduler.crt \
|
--client-certificate=/var/lib/kubernetes/pki/kube-scheduler.crt \
|
||||||
--client-key=kube-scheduler.key \
|
--client-key=/var/lib/kubernetes/pki/kube-scheduler.key \
|
||||||
--embed-certs=true \
|
|
||||||
--kubeconfig=kube-scheduler.kubeconfig
|
--kubeconfig=kube-scheduler.kubeconfig
|
||||||
|
|
||||||
kubectl config set-context default \
|
kubectl config set-context default \
|
||||||
|
@ -123,7 +123,7 @@ Reference docs for kube-scheduler [here](https://kubernetes.io/docs/reference/co
|
||||||
|
|
||||||
Generate a kubeconfig file for the `admin` user:
|
Generate a kubeconfig file for the `admin` user:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
kubectl config set-cluster kubernetes-the-hard-way \
|
kubectl config set-cluster kubernetes-the-hard-way \
|
||||||
--certificate-authority=ca.crt \
|
--certificate-authority=ca.crt \
|
||||||
|
@ -160,7 +160,7 @@ Reference docs for kubeconfig [here](https://kubernetes.io/docs/tasks/access-app
|
||||||
|
|
||||||
Copy the appropriate `kube-proxy` kubeconfig files to each worker instance:
|
Copy the appropriate `kube-proxy` kubeconfig files to each worker instance:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
for instance in worker-1 worker-2; do
|
for instance in worker-1 worker-2; do
|
||||||
scp kube-proxy.kubeconfig ${instance}:~/
|
scp kube-proxy.kubeconfig ${instance}:~/
|
||||||
done
|
done
|
||||||
|
@ -168,10 +168,20 @@ done
|
||||||
|
|
||||||
Copy the appropriate `admin.kubeconfig`, `kube-controller-manager` and `kube-scheduler` kubeconfig files to each controller instance:
|
Copy the appropriate `admin.kubeconfig`, `kube-controller-manager` and `kube-scheduler` kubeconfig files to each controller instance:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
for instance in master-1 master-2; do
|
for instance in master-1 master-2; do
|
||||||
scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/
|
scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Optional - Check kubeconfigs
|
||||||
|
|
||||||
|
At `master-1` and `master-2` nodes, run the following, selecting option 2
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cert_verify.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Prev: [Certificate Authority](04-certificate-authority.md)<br>
|
||||||
Next: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md)
|
Next: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md)
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
# Generating the Data Encryption Config and Key
|
# Generating the Data Encryption Config and Key
|
||||||
|
|
||||||
Kubernetes stores a variety of data including cluster state, application configurations, and secrets. Kubernetes supports the ability to [encrypt](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data) cluster data at rest.
|
Kubernetes stores a variety of data including cluster state, application configurations, and secrets. Kubernetes supports the ability to [encrypt](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data) cluster data at rest, that is, the data stored within `etcd`.
|
||||||
|
|
||||||
In this lab you will generate an encryption key and an [encryption config](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) suitable for encrypting Kubernetes Secrets.
|
In this lab you will generate an encryption key and an [encryption config](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) suitable for encrypting Kubernetes Secrets.
|
||||||
|
|
||||||
## The Encryption Key
|
## The Encryption Key
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
Generate an encryption key:
|
Generate an encryption key:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
|
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -16,7 +18,7 @@ ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
|
||||||
|
|
||||||
Create the `encryption-config.yaml` encryption config file:
|
Create the `encryption-config.yaml` encryption config file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat > encryption-config.yaml <<EOF
|
cat > encryption-config.yaml <<EOF
|
||||||
kind: EncryptionConfig
|
kind: EncryptionConfig
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
@ -34,7 +36,7 @@ EOF
|
||||||
|
|
||||||
Copy the `encryption-config.yaml` encryption config file to each controller instance:
|
Copy the `encryption-config.yaml` encryption config file to each controller instance:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
for instance in master-1 master-2; do
|
for instance in master-1 master-2; do
|
||||||
scp encryption-config.yaml ${instance}:~/
|
scp encryption-config.yaml ${instance}:~/
|
||||||
done
|
done
|
||||||
|
@ -42,12 +44,14 @@ done
|
||||||
|
|
||||||
Move `encryption-config.yaml` encryption config file to appropriate directory.
|
Move `encryption-config.yaml` encryption config file to appropriate directory.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
for instance in master-1 master-2; do
|
for instance in master-1 master-2; do
|
||||||
|
ssh ${instance} sudo mkdir -p /var/lib/kubernetes/
|
||||||
ssh ${instance} sudo mv encryption-config.yaml /var/lib/kubernetes/
|
ssh ${instance} sudo mv encryption-config.yaml /var/lib/kubernetes/
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#encrypting-your-data
|
Reference: https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#encrypting-your-data
|
||||||
|
|
||||||
|
Prev: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md)<br>
|
||||||
Next: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md)
|
Next: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Bootstrapping the etcd Cluster
|
# Bootstrapping the etcd Cluster
|
||||||
|
|
||||||
Kubernetes components are stateless and store cluster state in [etcd](https://github.com/coreos/etcd). In this lab you will bootstrap a two node etcd cluster and configure it for high availability and secure remote access.
|
Kubernetes components are stateless and store cluster state in [etcd](https://etcd.io/). In this lab you will bootstrap a two node etcd cluster and configure it for high availability and secure remote access.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
|
@ -14,46 +14,60 @@ The commands in this lab must be run on each controller instance: `master-1`, an
|
||||||
|
|
||||||
### Download and Install the etcd Binaries
|
### Download and Install the etcd Binaries
|
||||||
|
|
||||||
Download the official etcd release binaries from the [coreos/etcd](https://github.com/coreos/etcd) GitHub project:
|
Download the official etcd release binaries from the [etcd](https://github.com/etcd-io/etcd) GitHub project:
|
||||||
|
|
||||||
```
|
[//]: # (host:master-1-master2)
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
wget -q --show-progress --https-only --timestamping \
|
wget -q --show-progress --https-only --timestamping \
|
||||||
"https://github.com/coreos/etcd/releases/download/v3.3.9/etcd-v3.3.9-linux-amd64.tar.gz"
|
"https://github.com/coreos/etcd/releases/download/v3.5.3/etcd-v3.5.3-linux-amd64.tar.gz"
|
||||||
```
|
```
|
||||||
|
|
||||||
Extract and install the `etcd` server and the `etcdctl` command line utility:
|
Extract and install the `etcd` server and the `etcdctl` command line utility:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
tar -xvf etcd-v3.3.9-linux-amd64.tar.gz
|
tar -xvf etcd-v3.5.3-linux-amd64.tar.gz
|
||||||
sudo mv etcd-v3.3.9-linux-amd64/etcd* /usr/local/bin/
|
sudo mv etcd-v3.5.3-linux-amd64/etcd* /usr/local/bin/
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configure the etcd Server
|
### Configure the etcd Server
|
||||||
|
|
||||||
```
|
Copy and secure certificates. Note that we place `ca.crt` in our main PKI directory and link it from etcd to not have multiple copies of the cert lying around.
|
||||||
|
|
||||||
|
```bash
|
||||||
{
|
{
|
||||||
sudo mkdir -p /etc/etcd /var/lib/etcd
|
sudo mkdir -p /etc/etcd /var/lib/etcd /var/lib/kubernetes/pki
|
||||||
sudo cp ca.crt etcd-server.key etcd-server.crt /etc/etcd/
|
sudo cp etcd-server.key etcd-server.crt /etc/etcd/
|
||||||
|
sudo cp ca.crt /var/lib/kubernetes/pki/
|
||||||
|
sudo chown root:root /etc/etcd/*
|
||||||
|
sudo chmod 600 /etc/etcd/*
|
||||||
|
sudo chown root:root /var/lib/kubernetes/pki/*
|
||||||
|
sudo chmod 600 /var/lib/kubernetes/pki/*
|
||||||
|
sudo ln -s /var/lib/kubernetes/pki/ca.crt /etc/etcd/ca.crt
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address of the master(etcd) nodes:
|
The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers.<br>
|
||||||
|
Retrieve the internal IP address of the master(etcd) nodes, and also that of master-1 and master-2 for the etcd cluster member list
|
||||||
|
|
||||||
```
|
```bash
|
||||||
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
||||||
|
MASTER_1=$(dig +short master-1)
|
||||||
|
MASTER_2=$(dig +short master-2)
|
||||||
```
|
```
|
||||||
|
|
||||||
Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance:
|
Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
ETCD_NAME=$(hostname -s)
|
ETCD_NAME=$(hostname -s)
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `etcd.service` systemd unit file:
|
Create the `etcd.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat <<EOF | sudo tee /etc/systemd/system/etcd.service
|
cat <<EOF | sudo tee /etc/systemd/system/etcd.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=etcd
|
Description=etcd
|
||||||
|
@ -75,7 +89,7 @@ ExecStart=/usr/local/bin/etcd \\
|
||||||
--listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
|
--listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
|
||||||
--advertise-client-urls https://${INTERNAL_IP}:2379 \\
|
--advertise-client-urls https://${INTERNAL_IP}:2379 \\
|
||||||
--initial-cluster-token etcd-cluster-0 \\
|
--initial-cluster-token etcd-cluster-0 \\
|
||||||
--initial-cluster master-1=https://192.168.5.11:2380,master-2=https://192.168.5.12:2380 \\
|
--initial-cluster master-1=https://${MASTER_1}:2380,master-2=https://${MASTER_2}:2380 \\
|
||||||
--initial-cluster-state new \\
|
--initial-cluster-state new \\
|
||||||
--data-dir=/var/lib/etcd
|
--data-dir=/var/lib/etcd
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
|
@ -88,7 +102,7 @@ EOF
|
||||||
|
|
||||||
### Start the etcd Server
|
### Start the etcd Server
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload
|
||||||
sudo systemctl enable etcd
|
sudo systemctl enable etcd
|
||||||
|
@ -100,9 +114,11 @@ EOF
|
||||||
|
|
||||||
## Verification
|
## Verification
|
||||||
|
|
||||||
|
[//]: # (sleep:5)
|
||||||
|
|
||||||
List the etcd cluster members:
|
List the etcd cluster members:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
sudo ETCDCTL_API=3 etcdctl member list \
|
sudo ETCDCTL_API=3 etcdctl member list \
|
||||||
--endpoints=https://127.0.0.1:2379 \
|
--endpoints=https://127.0.0.1:2379 \
|
||||||
--cacert=/etc/etcd/ca.crt \
|
--cacert=/etc/etcd/ca.crt \
|
||||||
|
@ -113,10 +129,11 @@ sudo ETCDCTL_API=3 etcdctl member list \
|
||||||
> output
|
> output
|
||||||
|
|
||||||
```
|
```
|
||||||
45bf9ccad8d8900a, started, master-2, https://192.168.5.12:2380, https://192.168.5.12:2379
|
45bf9ccad8d8900a, started, master-2, https://192.168.56.12:2380, https://192.168.56.12:2379
|
||||||
54a5796a6803f252, started, master-1, https://192.168.5.11:2380, https://192.168.5.11:2379
|
54a5796a6803f252, started, master-1, https://192.168.56.11:2380, https://192.168.56.11:2379
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#starting-etcd-clusters
|
Reference: https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#starting-etcd-clusters
|
||||||
|
|
||||||
|
Prev: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md)]<br>
|
||||||
Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md)
|
Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md)
|
||||||
|
|
|
@ -2,39 +2,35 @@
|
||||||
|
|
||||||
In this lab you will bootstrap the Kubernetes control plane across 2 compute instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager.
|
In this lab you will bootstrap the Kubernetes control plane across 2 compute instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager.
|
||||||
|
|
||||||
|
Note that in a production-ready cluster it is recommended to have an odd number of master nodes as for multi-node services like etcd, leader election and quorum work better. See lecture on this ([KodeKloud](https://kodekloud.com/topic/etcd-in-ha/), [Udemy](https://www.udemy.com/course/certified-kubernetes-administrator-with-practice-tests/learn/lecture/14296192#overview)). We're only using two here to save on RAM on your workstation.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
The commands in this lab must be run on each controller instance: `master-1`, and `master-2`. Login to each controller instance using SSH Terminal. Example:
|
The commands in this lab up as far as the load balancer configuration must be run on each controller instance: `master-1`, and `master-2`. Login to each controller instance using SSH Terminal.
|
||||||
|
|
||||||
### Running commands in parallel with tmux
|
You can perform this step with [tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux)
|
||||||
|
|
||||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
|
||||||
|
|
||||||
## Provision the Kubernetes Control Plane
|
## Provision the Kubernetes Control Plane
|
||||||
|
|
||||||
Create the Kubernetes configuration directory:
|
[//]: # (host:master-1-master2)
|
||||||
|
|
||||||
```
|
|
||||||
sudo mkdir -p /etc/kubernetes/config
|
|
||||||
```
|
|
||||||
|
|
||||||
### Download and Install the Kubernetes Controller Binaries
|
### Download and Install the Kubernetes Controller Binaries
|
||||||
|
|
||||||
Download the official Kubernetes release binaries:
|
Download the official Kubernetes release binaries:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
wget -q --show-progress --https-only --timestamping \
|
wget -q --show-progress --https-only --timestamping \
|
||||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-apiserver" \
|
"https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-apiserver" \
|
||||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-controller-manager" \
|
"https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-controller-manager" \
|
||||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-scheduler" \
|
"https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-scheduler" \
|
||||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl"
|
"https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl"
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/setup/release/#server-binaries
|
Reference: https://kubernetes.io/releases/download/#binaries
|
||||||
|
|
||||||
Install the Kubernetes binaries:
|
Install the Kubernetes binaries:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl
|
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl
|
||||||
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
|
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
|
||||||
|
@ -43,32 +39,48 @@ Install the Kubernetes binaries:
|
||||||
|
|
||||||
### Configure the Kubernetes API Server
|
### Configure the Kubernetes API Server
|
||||||
|
|
||||||
```
|
Place the key pairs into the kubernetes data directory and secure
|
||||||
{
|
|
||||||
sudo mkdir -p /var/lib/kubernetes/
|
|
||||||
|
|
||||||
sudo cp ca.crt ca.key kube-apiserver.crt kube-apiserver.key \
|
```bash
|
||||||
service-account.key service-account.crt \
|
{
|
||||||
etcd-server.key etcd-server.crt \
|
sudo mkdir -p /var/lib/kubernetes/pki
|
||||||
encryption-config.yaml /var/lib/kubernetes/
|
|
||||||
|
# Only copy CA keys as we'll need them again for workers.
|
||||||
|
sudo cp ca.crt ca.key /var/lib/kubernetes/pki
|
||||||
|
for c in kube-apiserver service-account apiserver-kubelet-client etcd-server kube-scheduler kube-controller-manager
|
||||||
|
do
|
||||||
|
sudo mv "$c.crt" "$c.key" /var/lib/kubernetes/pki/
|
||||||
|
done
|
||||||
|
sudo chown root:root /var/lib/kubernetes/pki/*
|
||||||
|
sudo chmod 600 /var/lib/kubernetes/pki/*
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance:
|
The instance internal IP address will be used to advertise the API Server to members of the cluster. The load balancer IP address will be used as the external endpoint to the API servers.<br>
|
||||||
|
Retrieve these internal IP addresses:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
||||||
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify it is set
|
IP addresses of the two master nodes, where the etcd servers are.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
MASTER_1=$(dig +short master-1)
|
||||||
|
MASTER_2=$(dig +short master-2)
|
||||||
```
|
```
|
||||||
echo $INTERNAL_IP
|
|
||||||
|
CIDR ranges used *within* the cluster
|
||||||
|
|
||||||
|
```bash
|
||||||
|
POD_CIDR=10.244.0.0/16
|
||||||
|
SERVICE_CIDR=10.96.0.0/16
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `kube-apiserver.service` systemd unit file:
|
Create the `kube-apiserver.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service
|
cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes API Server
|
Description=Kubernetes API Server
|
||||||
|
@ -85,26 +97,26 @@ ExecStart=/usr/local/bin/kube-apiserver \\
|
||||||
--audit-log-path=/var/log/audit.log \\
|
--audit-log-path=/var/log/audit.log \\
|
||||||
--authorization-mode=Node,RBAC \\
|
--authorization-mode=Node,RBAC \\
|
||||||
--bind-address=0.0.0.0 \\
|
--bind-address=0.0.0.0 \\
|
||||||
--client-ca-file=/var/lib/kubernetes/ca.crt \\
|
--client-ca-file=/var/lib/kubernetes/pki/ca.crt \\
|
||||||
--enable-admission-plugins=NodeRestriction,ServiceAccount \\
|
--enable-admission-plugins=NodeRestriction,ServiceAccount \\
|
||||||
--enable-swagger-ui=true \\
|
|
||||||
--enable-bootstrap-token-auth=true \\
|
--enable-bootstrap-token-auth=true \\
|
||||||
--etcd-cafile=/var/lib/kubernetes/ca.crt \\
|
--etcd-cafile=/var/lib/kubernetes/pki/ca.crt \\
|
||||||
--etcd-certfile=/var/lib/kubernetes/etcd-server.crt \\
|
--etcd-certfile=/var/lib/kubernetes/pki/etcd-server.crt \\
|
||||||
--etcd-keyfile=/var/lib/kubernetes/etcd-server.key \\
|
--etcd-keyfile=/var/lib/kubernetes/pki/etcd-server.key \\
|
||||||
--etcd-servers=https://192.168.5.11:2379,https://192.168.5.12:2379 \\
|
--etcd-servers=https://${MASTER_1}:2379,https://${MASTER_2}:2379 \\
|
||||||
--event-ttl=1h \\
|
--event-ttl=1h \\
|
||||||
--encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
|
--encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
|
||||||
--kubelet-certificate-authority=/var/lib/kubernetes/ca.crt \\
|
--kubelet-certificate-authority=/var/lib/kubernetes/pki/ca.crt \\
|
||||||
--kubelet-client-certificate=/var/lib/kubernetes/kube-apiserver.crt \\
|
--kubelet-client-certificate=/var/lib/kubernetes/pki/apiserver-kubelet-client.crt \\
|
||||||
--kubelet-client-key=/var/lib/kubernetes/kube-apiserver.key \\
|
--kubelet-client-key=/var/lib/kubernetes/pki/apiserver-kubelet-client.key \\
|
||||||
--kubelet-https=true \\
|
|
||||||
--runtime-config=api/all=true \\
|
--runtime-config=api/all=true \\
|
||||||
--service-account-key-file=/var/lib/kubernetes/service-account.crt \\
|
--service-account-key-file=/var/lib/kubernetes/pki/service-account.crt \\
|
||||||
--service-cluster-ip-range=10.96.0.0/24 \\
|
--service-account-signing-key-file=/var/lib/kubernetes/pki/service-account.key \\
|
||||||
|
--service-account-issuer=https://${LOADBALANCER}:6443 \\
|
||||||
|
--service-cluster-ip-range=${SERVICE_CIDR} \\
|
||||||
--service-node-port-range=30000-32767 \\
|
--service-node-port-range=30000-32767 \\
|
||||||
--tls-cert-file=/var/lib/kubernetes/kube-apiserver.crt \\
|
--tls-cert-file=/var/lib/kubernetes/pki/kube-apiserver.crt \\
|
||||||
--tls-private-key-file=/var/lib/kubernetes/kube-apiserver.key \\
|
--tls-private-key-file=/var/lib/kubernetes/pki/kube-apiserver.key \\
|
||||||
--v=2
|
--v=2
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
|
@ -116,15 +128,15 @@ EOF
|
||||||
|
|
||||||
### Configure the Kubernetes Controller Manager
|
### Configure the Kubernetes Controller Manager
|
||||||
|
|
||||||
Copy the `kube-controller-manager` kubeconfig into place:
|
Move the `kube-controller-manager` kubeconfig into place:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
sudo cp kube-controller-manager.kubeconfig /var/lib/kubernetes/
|
sudo mv kube-controller-manager.kubeconfig /var/lib/kubernetes/
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `kube-controller-manager.service` systemd unit file:
|
Create the `kube-controller-manager.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
|
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Controller Manager
|
Description=Kubernetes Controller Manager
|
||||||
|
@ -132,16 +144,23 @@ Documentation=https://github.com/kubernetes/kubernetes
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/local/bin/kube-controller-manager \\
|
ExecStart=/usr/local/bin/kube-controller-manager \\
|
||||||
--address=0.0.0.0 \\
|
--allocate-node-cidrs=true \\
|
||||||
--cluster-cidr=192.168.5.0/24 \\
|
--authentication-kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
|
||||||
|
--authorization-kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
|
||||||
|
--bind-address=127.0.0.1 \\
|
||||||
|
--client-ca-file=/var/lib/kubernetes/pki/ca.crt \\
|
||||||
|
--cluster-cidr=${POD_CIDR} \\
|
||||||
--cluster-name=kubernetes \\
|
--cluster-name=kubernetes \\
|
||||||
--cluster-signing-cert-file=/var/lib/kubernetes/ca.crt \\
|
--cluster-signing-cert-file=/var/lib/kubernetes/pki/ca.crt \\
|
||||||
--cluster-signing-key-file=/var/lib/kubernetes/ca.key \\
|
--cluster-signing-key-file=/var/lib/kubernetes/pki/ca.key \\
|
||||||
|
--controllers=*,bootstrapsigner,tokencleaner \\
|
||||||
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
|
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
|
||||||
--leader-elect=true \\
|
--leader-elect=true \\
|
||||||
--root-ca-file=/var/lib/kubernetes/ca.crt \\
|
--node-cidr-mask-size=24 \\
|
||||||
--service-account-private-key-file=/var/lib/kubernetes/service-account.key \\
|
--requestheader-client-ca-file=/var/lib/kubernetes/pki/ca.crt \\
|
||||||
--service-cluster-ip-range=10.96.0.0/24 \\
|
--root-ca-file=/var/lib/kubernetes/pki/ca.crt \\
|
||||||
|
--service-account-private-key-file=/var/lib/kubernetes/pki/service-account.key \\
|
||||||
|
--service-cluster-ip-range=${SERVICE_CIDR} \\
|
||||||
--use-service-account-credentials=true \\
|
--use-service-account-credentials=true \\
|
||||||
--v=2
|
--v=2
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
|
@ -154,15 +173,15 @@ EOF
|
||||||
|
|
||||||
### Configure the Kubernetes Scheduler
|
### Configure the Kubernetes Scheduler
|
||||||
|
|
||||||
Copy the `kube-scheduler` kubeconfig into place:
|
Move the `kube-scheduler` kubeconfig into place:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
sudo cp kube-scheduler.kubeconfig /var/lib/kubernetes/
|
sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `kube-scheduler.service` systemd unit file:
|
Create the `kube-scheduler.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
|
cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Scheduler
|
Description=Kubernetes Scheduler
|
||||||
|
@ -171,7 +190,6 @@ Documentation=https://github.com/kubernetes/kubernetes
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/local/bin/kube-scheduler \\
|
ExecStart=/usr/local/bin/kube-scheduler \\
|
||||||
--kubeconfig=/var/lib/kubernetes/kube-scheduler.kubeconfig \\
|
--kubeconfig=/var/lib/kubernetes/kube-scheduler.kubeconfig \\
|
||||||
--address=127.0.0.1 \\
|
|
||||||
--leader-elect=true \\
|
--leader-elect=true \\
|
||||||
--v=2
|
--v=2
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
|
@ -182,9 +200,24 @@ WantedBy=multi-user.target
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Secure kubeconfigs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo chmod 600 /var/lib/kubernetes/*.kubeconfig
|
||||||
|
```
|
||||||
|
|
||||||
|
## Optional - Check Certificates and kubeconfigs
|
||||||
|
|
||||||
|
At `master-1` and `master-2` nodes, run the following, selecting option 3
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cert_verify.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Start the Controller Services
|
### Start the Controller Services
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload
|
||||||
sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler
|
sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler
|
||||||
|
@ -197,11 +230,18 @@ EOF
|
||||||
|
|
||||||
### Verification
|
### Verification
|
||||||
|
|
||||||
```
|
[//]: # (sleep:10)
|
||||||
|
|
||||||
|
```bash
|
||||||
kubectl get componentstatuses --kubeconfig admin.kubeconfig
|
kubectl get componentstatuses --kubeconfig admin.kubeconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
|
It will give you a deprecation warning here, but that's ok.
|
||||||
|
|
||||||
|
> Output
|
||||||
|
|
||||||
```
|
```
|
||||||
|
Warning: v1 ComponentStatus is deprecated in v1.19+
|
||||||
NAME STATUS MESSAGE ERROR
|
NAME STATUS MESSAGE ERROR
|
||||||
controller-manager Healthy ok
|
controller-manager Healthy ok
|
||||||
scheduler Healthy ok
|
scheduler Healthy ok
|
||||||
|
@ -218,16 +258,31 @@ In this section you will provision an external load balancer to front the Kubern
|
||||||
|
|
||||||
### Provision a Network Load Balancer
|
### Provision a Network Load Balancer
|
||||||
|
|
||||||
|
A NLB operates at [layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_layer) (TCP) meaning it passes the traffic straight through to the back end servers unfettered and does not interfere with the TLS process, leaving this to the Kube API servers.
|
||||||
|
|
||||||
Login to `loadbalancer` instance using SSH Terminal.
|
Login to `loadbalancer` instance using SSH Terminal.
|
||||||
|
|
||||||
```
|
[//]: # (host:loadbalancer)
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
sudo apt-get update && sudo apt-get install -y haproxy
|
sudo apt-get update && sudo apt-get install -y haproxy
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Read IP addresses of master nodes and this host to shell variables
|
||||||
|
|
||||||
|
```bash
|
||||||
|
MASTER_1=$(dig +short master-1)
|
||||||
|
MASTER_2=$(dig +short master-2)
|
||||||
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
```
|
```
|
||||||
cat <<EOF | sudo tee /etc/haproxy/haproxy.cfg
|
|
||||||
|
Create HAProxy configuration to listen on API server port on this host and distribute requests evently to the two master nodes.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat <<EOF | sudo tee /etc/haproxy/haproxy.cfg
|
||||||
frontend kubernetes
|
frontend kubernetes
|
||||||
bind 192.168.5.30:6443
|
bind ${LOADBALANCER}:6443
|
||||||
option tcplog
|
option tcplog
|
||||||
mode tcp
|
mode tcp
|
||||||
default_backend kubernetes-master-nodes
|
default_backend kubernetes-master-nodes
|
||||||
|
@ -236,21 +291,23 @@ backend kubernetes-master-nodes
|
||||||
mode tcp
|
mode tcp
|
||||||
balance roundrobin
|
balance roundrobin
|
||||||
option tcp-check
|
option tcp-check
|
||||||
server master-1 192.168.5.11:6443 check fall 3 rise 2
|
server master-1 ${MASTER_1}:6443 check fall 3 rise 2
|
||||||
server master-2 192.168.5.12:6443 check fall 3 rise 2
|
server master-2 ${MASTER_2}:6443 check fall 3 rise 2
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```bash
|
||||||
sudo service haproxy restart
|
sudo systemctl restart haproxy
|
||||||
```
|
```
|
||||||
|
|
||||||
### Verification
|
### Verification
|
||||||
|
|
||||||
|
[//]: # (sleep:2)
|
||||||
|
|
||||||
Make a HTTP request for the Kubernetes version info:
|
Make a HTTP request for the Kubernetes version info:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
curl https://192.168.5.30:6443/version -k
|
curl https://${LOADBALANCER}:6443/version -k
|
||||||
```
|
```
|
||||||
|
|
||||||
> output
|
> output
|
||||||
|
@ -258,15 +315,16 @@ curl https://192.168.5.30:6443/version -k
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"major": "1",
|
"major": "1",
|
||||||
"minor": "13",
|
"minor": "24",
|
||||||
"gitVersion": "v1.13.0",
|
"gitVersion": "v1.24.3",
|
||||||
"gitCommit": "ddf47ac13c1a9483ea035a79cd7c10005ff21a6d",
|
"gitCommit": "aef86a93758dc3cb2c658dd9657ab4ad4afc21cb",
|
||||||
"gitTreeState": "clean",
|
"gitTreeState": "clean",
|
||||||
"buildDate": "2018-12-03T20:56:12Z",
|
"buildDate": "2022-07-13T14:23:26Z",
|
||||||
"goVersion": "go1.11.2",
|
"goVersion": "go1.18.3",
|
||||||
"compiler": "gc",
|
"compiler": "gc",
|
||||||
"platform": "linux/amd64"
|
"platform": "linux/amd64"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Next: [Bootstrapping the Kubernetes Worker Nodes](09-bootstrapping-kubernetes-workers.md)
|
Prev: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md)<br>
|
||||||
|
Next: [Installing CRI on the Kubernetes Worker Nodes](09-install-cri-workers.md)
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
# Installing CRI on the Kubernetes Worker Nodes
|
||||||
|
|
||||||
|
In this lab you will install the Container Runtime Interface (CRI) on both worker nodes. CRI is a standard interface for the management of containers. Since v1.24 the use of dockershim has been fully deprecated and removed from the code base. [containerd replaces docker](https://kodekloud.com/blog/kubernetes-removed-docker-what-happens-now/) as the container runtime for Kubernetes, and it requires support from [CNI Plugins](https://github.com/containernetworking/plugins) to configure container networks, and [runc](https://github.com/opencontainers/runc) to actually do the job of running containers.
|
||||||
|
|
||||||
|
Reference: https://github.com/containerd/containerd/blob/main/docs/getting-started.md
|
||||||
|
|
||||||
|
### Download and Install Container Networking
|
||||||
|
|
||||||
|
The commands in this lab must be run on each worker instance: `worker-1`, and `worker-2`. Login to each controller instance using SSH Terminal.
|
||||||
|
|
||||||
|
[//]: # (host:worker-1-worker-2)
|
||||||
|
|
||||||
|
You can perform this step with [tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux)
|
||||||
|
|
||||||
|
The versions chosen here align with those that are installed by the current `kubernetes-cni` package for a v1.24 cluster.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
CONTAINERD_VERSION=1.5.9
|
||||||
|
CNI_VERSION=0.8.6
|
||||||
|
RUNC_VERSION=1.1.1
|
||||||
|
|
||||||
|
wget -q --show-progress --https-only --timestamping \
|
||||||
|
https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz \
|
||||||
|
https://github.com/containernetworking/plugins/releases/download/v${CNI_VERSION}/cni-plugins-linux-amd64-v${CNI_VERSION}.tgz \
|
||||||
|
https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.amd64
|
||||||
|
|
||||||
|
sudo mkdir -p /opt/cni/bin
|
||||||
|
|
||||||
|
sudo chmod +x runc.amd64
|
||||||
|
sudo mv runc.amd64 /usr/local/bin/runc
|
||||||
|
|
||||||
|
sudo tar -xzvf containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz -C /usr/local
|
||||||
|
sudo tar -xzvf cni-plugins-linux-amd64-v${CNI_VERSION}.tgz -C /opt/cni/bin
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Next create the `containerd` service unit.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat <<EOF | sudo tee /etc/systemd/system/containerd.service
|
||||||
|
[Unit]
|
||||||
|
Description=containerd container runtime
|
||||||
|
Documentation=https://containerd.io
|
||||||
|
After=network.target local-fs.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStartPre=-/sbin/modprobe overlay
|
||||||
|
ExecStart=/usr/local/bin/containerd
|
||||||
|
|
||||||
|
Type=notify
|
||||||
|
Delegate=yes
|
||||||
|
KillMode=process
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||||
|
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||||
|
LimitNPROC=infinity
|
||||||
|
LimitCORE=infinity
|
||||||
|
LimitNOFILE=infinity
|
||||||
|
# Comment TasksMax if your systemd version does not supports it.
|
||||||
|
# Only systemd 226 and above support this version.
|
||||||
|
TasksMax=infinity
|
||||||
|
OOMScoreAdjust=-999
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
Now start it
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
sudo systemctl enable containerd
|
||||||
|
sudo systemctl start containerd
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Prev: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md)</br>
|
||||||
|
Next: [Bootstrapping the Kubernetes Worker Nodes](10-bootstrapping-kubernetes-workers.md)
|
|
@ -1,6 +1,6 @@
|
||||||
# Bootstrapping the Kubernetes Worker Nodes
|
# Bootstrapping the Kubernetes Worker Nodes
|
||||||
|
|
||||||
In this lab you will bootstrap 2 Kubernetes worker nodes. We already have [Docker](https://www.docker.com) installed on these nodes.
|
In this lab you will bootstrap 2 Kubernetes worker nodes. We already installed `containerd` and its dependencies on these nodes in the previous lab.
|
||||||
|
|
||||||
We will now install the kubernetes components
|
We will now install the kubernetes components
|
||||||
- [kubelet](https://kubernetes.io/docs/admin/kubelet)
|
- [kubelet](https://kubernetes.io/docs/admin/kubelet)
|
||||||
|
@ -8,7 +8,7 @@ We will now install the kubernetes components
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
The Certificates and Configuration are created on `master-1` node and then copied over to workers using `scp`.
|
The Certificates and Configuration are created on `master-1` node and then copied over to workers using `scp`.
|
||||||
Once this is done, the commands are to be run on first worker instance: `worker-1`. Login to first worker instance using SSH Terminal.
|
Once this is done, the commands are to be run on first worker instance: `worker-1`. Login to first worker instance using SSH Terminal.
|
||||||
|
|
||||||
### Provisioning Kubelet Client Certificates
|
### Provisioning Kubelet Client Certificates
|
||||||
|
@ -17,9 +17,15 @@ Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/doc
|
||||||
|
|
||||||
Generate a certificate and private key for one worker node:
|
Generate a certificate and private key for one worker node:
|
||||||
|
|
||||||
On master-1:
|
On `master-1`:
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
WORKER_1=$(dig +short worker-1)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
cat > openssl-worker-1.cnf <<EOF
|
cat > openssl-worker-1.cnf <<EOF
|
||||||
[req]
|
[req]
|
||||||
req_extensions = v3_req
|
req_extensions = v3_req
|
||||||
|
@ -31,7 +37,7 @@ keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||||
subjectAltName = @alt_names
|
subjectAltName = @alt_names
|
||||||
[alt_names]
|
[alt_names]
|
||||||
DNS.1 = worker-1
|
DNS.1 = worker-1
|
||||||
IP.1 = 192.168.5.21
|
IP.1 = ${WORKER_1}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
openssl genrsa -out worker-1.key 2048
|
openssl genrsa -out worker-1.key 2048
|
||||||
|
@ -51,25 +57,24 @@ worker-1.crt
|
||||||
When generating kubeconfig files for Kubelets the client certificate matching the Kubelet's node name must be used. This will ensure Kubelets are properly authorized by the Kubernetes [Node Authorizer](https://kubernetes.io/docs/admin/authorization/node/).
|
When generating kubeconfig files for Kubelets the client certificate matching the Kubelet's node name must be used. This will ensure Kubelets are properly authorized by the Kubernetes [Node Authorizer](https://kubernetes.io/docs/admin/authorization/node/).
|
||||||
|
|
||||||
Get the kub-api server load-balancer IP.
|
Get the kub-api server load-balancer IP.
|
||||||
```
|
|
||||||
LOADBALANCER_ADDRESS=192.168.5.30
|
```bash
|
||||||
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
```
|
```
|
||||||
|
|
||||||
Generate a kubeconfig file for the first worker node.
|
Generate a kubeconfig file for the first worker node.
|
||||||
|
|
||||||
On master-1:
|
On `master-1`:
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
kubectl config set-cluster kubernetes-the-hard-way \
|
kubectl config set-cluster kubernetes-the-hard-way \
|
||||||
--certificate-authority=ca.crt \
|
--certificate-authority=/var/lib/kubernetes/pki/ca.crt \
|
||||||
--embed-certs=true \
|
--server=https://${LOADBALANCER}:6443 \
|
||||||
--server=https://${LOADBALANCER_ADDRESS}:6443 \
|
|
||||||
--kubeconfig=worker-1.kubeconfig
|
--kubeconfig=worker-1.kubeconfig
|
||||||
|
|
||||||
kubectl config set-credentials system:node:worker-1 \
|
kubectl config set-credentials system:node:worker-1 \
|
||||||
--client-certificate=worker-1.crt \
|
--client-certificate=/var/lib/kubernetes/pki/worker-1.crt \
|
||||||
--client-key=worker-1.key \
|
--client-key=/var/lib/kubernetes/pki/worker-1.key \
|
||||||
--embed-certs=true \
|
|
||||||
--kubeconfig=worker-1.kubeconfig
|
--kubeconfig=worker-1.kubeconfig
|
||||||
|
|
||||||
kubectl config set-context default \
|
kubectl config set-context default \
|
||||||
|
@ -88,40 +93,42 @@ worker-1.kubeconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
### Copy certificates, private keys and kubeconfig files to the worker node:
|
### Copy certificates, private keys and kubeconfig files to the worker node:
|
||||||
On master-1:
|
On `master-1`:
|
||||||
```
|
|
||||||
master-1$ scp ca.crt worker-1.crt worker-1.key worker-1.kubeconfig worker-1:~/
|
```bash
|
||||||
|
scp ca.crt worker-1.crt worker-1.key worker-1.kubeconfig worker-1:~/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Download and Install Worker Binaries
|
### Download and Install Worker Binaries
|
||||||
|
|
||||||
Going forward all activities are to be done on the `worker-1` node.
|
All the following commands from here until the [verification](#verification) step must be run on `worker-1`
|
||||||
|
|
||||||
On worker-1:
|
[//]: # (host:worker-1)
|
||||||
```
|
|
||||||
worker-1$ wget -q --show-progress --https-only --timestamping \
|
|
||||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl \
|
```bash
|
||||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-proxy \
|
wget -q --show-progress --https-only --timestamping \
|
||||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubelet
|
https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \
|
||||||
|
https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-proxy \
|
||||||
|
https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubelet
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/setup/release/#node-binaries
|
Reference: https://kubernetes.io/releases/download/#binaries
|
||||||
|
|
||||||
Create the installation directories:
|
Create the installation directories:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-1$ sudo mkdir -p \
|
sudo mkdir -p \
|
||||||
/etc/cni/net.d \
|
|
||||||
/opt/cni/bin \
|
|
||||||
/var/lib/kubelet \
|
/var/lib/kubelet \
|
||||||
/var/lib/kube-proxy \
|
/var/lib/kube-proxy \
|
||||||
/var/lib/kubernetes \
|
/var/lib/kubernetes/pki \
|
||||||
/var/run/kubernetes
|
/var/run/kubernetes
|
||||||
```
|
```
|
||||||
|
|
||||||
Install the worker binaries:
|
Install the worker binaries:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
chmod +x kubectl kube-proxy kubelet
|
chmod +x kubectl kube-proxy kubelet
|
||||||
sudo mv kubectl kube-proxy kubelet /usr/local/bin/
|
sudo mv kubectl kube-proxy kubelet /usr/local/bin/
|
||||||
|
@ -130,18 +137,39 @@ Install the worker binaries:
|
||||||
|
|
||||||
### Configure the Kubelet
|
### Configure the Kubelet
|
||||||
On worker-1:
|
On worker-1:
|
||||||
```
|
|
||||||
|
Copy keys and config to correct directories and secure
|
||||||
|
|
||||||
|
```bash
|
||||||
{
|
{
|
||||||
sudo mv ${HOSTNAME}.key ${HOSTNAME}.crt /var/lib/kubelet/
|
sudo mv ${HOSTNAME}.key ${HOSTNAME}.crt /var/lib/kubernetes/pki/
|
||||||
sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig
|
sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubelet.kubeconfig
|
||||||
sudo mv ca.crt /var/lib/kubernetes/
|
sudo mv ca.crt /var/lib/kubernetes/pki/
|
||||||
|
sudo mv kube-proxy.crt kube-proxy.key /var/lib/kubernetes/pki/
|
||||||
|
sudo chown root:root /var/lib/kubernetes/pki/*
|
||||||
|
sudo chmod 600 /var/lib/kubernetes/pki/*
|
||||||
|
sudo chown root:root /var/lib/kubelet/*
|
||||||
|
sudo chmod 600 /var/lib/kubelet/*
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
CIDR ranges used *within* the cluster
|
||||||
|
|
||||||
|
```bash
|
||||||
|
POD_CIDR=10.244.0.0/16
|
||||||
|
SERVICE_CIDR=10.96.0.0/16
|
||||||
|
```
|
||||||
|
|
||||||
|
Compute cluster DNS addess, which is conventionally .10 in the service CIDR range
|
||||||
|
|
||||||
|
```bash
|
||||||
|
CLUSTER_DNS=$(echo $SERVICE_CIDR | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s.10", $1, $2, $3) }')
|
||||||
|
```
|
||||||
|
|
||||||
Create the `kubelet-config.yaml` configuration file:
|
Create the `kubelet-config.yaml` configuration file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-1$ cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
|
cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
|
||||||
kind: KubeletConfiguration
|
kind: KubeletConfiguration
|
||||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||||
authentication:
|
authentication:
|
||||||
|
@ -150,14 +178,17 @@ authentication:
|
||||||
webhook:
|
webhook:
|
||||||
enabled: true
|
enabled: true
|
||||||
x509:
|
x509:
|
||||||
clientCAFile: "/var/lib/kubernetes/ca.crt"
|
clientCAFile: /var/lib/kubernetes/pki/ca.crt
|
||||||
authorization:
|
authorization:
|
||||||
mode: Webhook
|
mode: Webhook
|
||||||
clusterDomain: "cluster.local"
|
clusterDomain: cluster.local
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- "10.96.0.10"
|
- ${CLUSTER_DNS}
|
||||||
resolvConf: "/run/systemd/resolve/resolv.conf"
|
resolvConf: /run/systemd/resolve/resolv.conf
|
||||||
runtimeRequestTimeout: "15m"
|
runtimeRequestTimeout: "15m"
|
||||||
|
tlsCertFile: /var/lib/kubernetes/pki/${HOSTNAME}.crt
|
||||||
|
tlsPrivateKeyFile: /var/lib/kubernetes/pki/${HOSTNAME}.key
|
||||||
|
registerNode: true
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -165,23 +196,19 @@ EOF
|
||||||
|
|
||||||
Create the `kubelet.service` systemd unit file:
|
Create the `kubelet.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-1$ cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Kubelet
|
Description=Kubernetes Kubelet
|
||||||
Documentation=https://github.com/kubernetes/kubernetes
|
Documentation=https://github.com/kubernetes/kubernetes
|
||||||
After=docker.service
|
After=containerd.service
|
||||||
Requires=docker.service
|
Requires=containerd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/local/bin/kubelet \\
|
ExecStart=/usr/local/bin/kubelet \\
|
||||||
--config=/var/lib/kubelet/kubelet-config.yaml \\
|
--config=/var/lib/kubelet/kubelet-config.yaml \\
|
||||||
--image-pull-progress-deadline=2m \\
|
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
|
||||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
--kubeconfig=/var/lib/kubelet/kubelet.kubeconfig \\
|
||||||
--tls-cert-file=/var/lib/kubelet/${HOSTNAME}.crt \\
|
|
||||||
--tls-private-key-file=/var/lib/kubelet/${HOSTNAME}.key \\
|
|
||||||
--network-plugin=cni \\
|
|
||||||
--register-node=true \\
|
|
||||||
--v=2
|
--v=2
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
|
@ -193,27 +220,28 @@ EOF
|
||||||
|
|
||||||
### Configure the Kubernetes Proxy
|
### Configure the Kubernetes Proxy
|
||||||
On worker-1:
|
On worker-1:
|
||||||
```
|
|
||||||
worker-1$ sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
|
```bash
|
||||||
|
sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `kube-proxy-config.yaml` configuration file:
|
Create the `kube-proxy-config.yaml` configuration file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-1$ cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
|
cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
|
||||||
kind: KubeProxyConfiguration
|
kind: KubeProxyConfiguration
|
||||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
clientConnection:
|
clientConnection:
|
||||||
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
|
kubeconfig: "/var/lib/kube-proxy/kube-proxy.kubeconfig"
|
||||||
mode: "iptables"
|
mode: "iptables"
|
||||||
clusterCIDR: "192.168.5.0/24"
|
clusterCIDR: ${POD_CIDR}
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `kube-proxy.service` systemd unit file:
|
Create the `kube-proxy.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-1$ cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
|
cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Kube Proxy
|
Description=Kubernetes Kube Proxy
|
||||||
Documentation=https://github.com/kubernetes/kubernetes
|
Documentation=https://github.com/kubernetes/kubernetes
|
||||||
|
@ -229,9 +257,18 @@ WantedBy=multi-user.target
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Optional - Check Certificates and kubeconfigs
|
||||||
|
|
||||||
|
At `worker-1` node, run the following, selecting option 4
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cert_verify.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Start the Worker Services
|
### Start the Worker Services
|
||||||
On worker-1:
|
On worker-1:
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload
|
||||||
sudo systemctl enable kubelet kube-proxy
|
sudo systemctl enable kubelet kube-proxy
|
||||||
|
@ -242,24 +279,25 @@ On worker-1:
|
||||||
> Remember to run the above commands on worker node: `worker-1`
|
> Remember to run the above commands on worker node: `worker-1`
|
||||||
|
|
||||||
## Verification
|
## Verification
|
||||||
On master-1:
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
|
Now return to the `master-1` node.
|
||||||
|
|
||||||
List the registered Kubernetes nodes from the master node:
|
List the registered Kubernetes nodes from the master node:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
master-1$ kubectl get nodes --kubeconfig admin.kubeconfig
|
kubectl get nodes --kubeconfig admin.kubeconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
> output
|
> output
|
||||||
|
|
||||||
```
|
```
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
worker-1 NotReady <none> 93s v1.13.0
|
worker-1 NotReady <none> 93s v1.24.3
|
||||||
```
|
```
|
||||||
|
|
||||||
> Note: It is OK for the worker node to be in a NotReady state.
|
The node is not ready as we have not yet installed pod networking. This comes later.
|
||||||
That is because we haven't configured Networking yet.
|
|
||||||
|
|
||||||
Optional: At this point you may run the certificate verification script to make sure all certificates are configured correctly. Follow the instructions [here](verify-certificates.md)
|
Prev: [Installing CRI on the Kubernetes Worker Nodes](09-install-cri-workers.md)<br>
|
||||||
|
Next: [TLS Bootstrapping Kubernetes Workers](11-tls-bootstrapping-kubernetes-workers.md)
|
||||||
Next: [TLS Bootstrapping Kubernetes Workers](10-tls-bootstrapping-kubernetes-workers.md)
|
|
|
@ -18,7 +18,7 @@ This is not a practical approach when you have 1000s of nodes in the cluster, an
|
||||||
|
|
||||||
In Kubernetes 1.11 a patch was merged to require administrator or Controller approval of node serving CSRs for security reasons.
|
In Kubernetes 1.11 a patch was merged to require administrator or Controller approval of node serving CSRs for security reasons.
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#certificate-rotation
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#certificate-rotation
|
||||||
|
|
||||||
So let's get started!
|
So let's get started!
|
||||||
|
|
||||||
|
@ -39,58 +39,26 @@ So let's get started!
|
||||||
--cluster-signing-key-file=/var/lib/kubernetes/ca.key
|
--cluster-signing-key-file=/var/lib/kubernetes/ca.key
|
||||||
```
|
```
|
||||||
|
|
||||||
> Note: We have already configured these in our setup in this course
|
> Note: We have already configured these in lab 8 in this course
|
||||||
|
|
||||||
Copy the ca certificate to the worker node:
|
|
||||||
|
|
||||||
|
|
||||||
## Step 1 Configure the Binaries on the Worker node
|
|
||||||
|
|
||||||
### Download and Install Worker Binaries
|
|
||||||
|
|
||||||
```
|
|
||||||
worker-2$ wget -q --show-progress --https-only --timestamping \
|
|
||||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl \
|
|
||||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-proxy \
|
|
||||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubelet
|
|
||||||
```
|
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/setup/release/#node-binaries
|
|
||||||
|
|
||||||
Create the installation directories:
|
|
||||||
|
|
||||||
```
|
|
||||||
worker-2$ sudo mkdir -p \
|
|
||||||
/etc/cni/net.d \
|
|
||||||
/opt/cni/bin \
|
|
||||||
/var/lib/kubelet \
|
|
||||||
/var/lib/kube-proxy \
|
|
||||||
/var/lib/kubernetes \
|
|
||||||
/var/run/kubernetes
|
|
||||||
```
|
|
||||||
|
|
||||||
Install the worker binaries:
|
|
||||||
|
|
||||||
```
|
|
||||||
{
|
|
||||||
chmod +x kubectl kube-proxy kubelet
|
|
||||||
sudo mv kubectl kube-proxy kubelet /usr/local/bin/
|
|
||||||
}
|
|
||||||
```
|
|
||||||
### Move the ca certificate
|
|
||||||
|
|
||||||
`worker-2$ sudo mv ca.crt /var/lib/kubernetes/`
|
|
||||||
|
|
||||||
# Step 1 Create the Boostrap Token to be used by Nodes(Kubelets) to invoke Certificate API
|
# Step 1 Create the Boostrap Token to be used by Nodes(Kubelets) to invoke Certificate API
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
|
Run the following steps on `master-1`
|
||||||
|
|
||||||
For the workers(kubelet) to access the Certificates API, they need to authenticate to the kubernetes api-server first. For this we create a [Bootstrap Token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) to be used by the kubelet
|
For the workers(kubelet) to access the Certificates API, they need to authenticate to the kubernetes api-server first. For this we create a [Bootstrap Token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) to be used by the kubelet
|
||||||
|
|
||||||
Bootstrap Tokens take the form of a 6 character token id followed by 16 character token secret separated by a dot. Eg: abcdef.0123456789abcdef. More formally, they must match the regular expression [a-z0-9]{6}\.[a-z0-9]{16}
|
Bootstrap Tokens take the form of a 6 character token id followed by 16 character token secret separated by a dot. Eg: abcdef.0123456789abcdef. More formally, they must match the regular expression [a-z0-9]{6}\.[a-z0-9]{16}
|
||||||
|
|
||||||
|
Set an expiration date for the bootstrap token of 7 days from now (you can adjust this)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
EXPIRATION=$(date -u --date "+7 days" +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
```
|
```
|
||||||
master-1$ cat > bootstrap-token-07401b.yaml <<EOF
|
|
||||||
|
```bash
|
||||||
|
cat > bootstrap-token-07401b.yaml <<EOF
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -109,7 +77,7 @@ stringData:
|
||||||
token-secret: f395accd246ae52d
|
token-secret: f395accd246ae52d
|
||||||
|
|
||||||
# Expiration. Optional.
|
# Expiration. Optional.
|
||||||
expiration: 2021-03-10T03:22:11Z
|
expiration: ${EXPIRATION}
|
||||||
|
|
||||||
# Allowed usages.
|
# Allowed usages.
|
||||||
usage-bootstrap-authentication: "true"
|
usage-bootstrap-authentication: "true"
|
||||||
|
@ -120,12 +88,12 @@ stringData:
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
||||||
master-1$ kubectl create -f bootstrap-token-07401b.yaml
|
kubectl create -f bootstrap-token-07401b.yaml --kubeconfig admin.kubeconfig
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Things to note:
|
Things to note:
|
||||||
- **expiration** - make sure its set to a date in the future.
|
- **expiration** - make sure its set to a date in the future. The computed shell variable `EXPIRATION` ensures this.
|
||||||
- **auth-extra-groups** - this is the group the worker nodes are part of. It must start with "system:bootstrappers:" This group does not exist already. This group is associated with this token.
|
- **auth-extra-groups** - this is the group the worker nodes are part of. It must start with "system:bootstrappers:" This group does not exist already. This group is associated with this token.
|
||||||
|
|
||||||
Once this is created the token to be used for authentication is `07401b.f395accd246ae52d`
|
Once this is created the token to be used for authentication is `07401b.f395accd246ae52d`
|
||||||
|
@ -136,12 +104,16 @@ Reference: https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tok
|
||||||
|
|
||||||
Next we associate the group we created before to the system:node-bootstrapper ClusterRole. This ClusterRole gives the group enough permissions to bootstrap the kubelet
|
Next we associate the group we created before to the system:node-bootstrapper ClusterRole. This ClusterRole gives the group enough permissions to bootstrap the kubelet
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create clusterrolebinding create-csrs-for-bootstrapping \
|
||||||
|
--clusterrole=system:node-bootstrapper \
|
||||||
|
--group=system:bootstrappers \
|
||||||
|
--kubeconfig admin.kubeconfig
|
||||||
```
|
```
|
||||||
master-1$ kubectl create clusterrolebinding create-csrs-for-bootstrapping --clusterrole=system:node-bootstrapper --group=system:bootstrappers
|
|
||||||
|
|
||||||
--------------- OR ---------------
|
--------------- OR ---------------
|
||||||
|
|
||||||
master-1$ cat > csrs-for-bootstrapping.yaml <<EOF
|
```bash
|
||||||
|
cat > csrs-for-bootstrapping.yaml <<EOF
|
||||||
# enable bootstrapping nodes to create CSR
|
# enable bootstrapping nodes to create CSR
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
@ -158,18 +130,24 @@ roleRef:
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
||||||
master-1$ kubectl create -f csrs-for-bootstrapping.yaml
|
kubectl create -f csrs-for-bootstrapping.yaml --kubeconfig admin.kubeconfig
|
||||||
|
|
||||||
```
|
```
|
||||||
Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#authorize-kubelet-to-create-csr
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#authorize-kubelet-to-create-csr
|
||||||
|
|
||||||
## Step 3 Authorize workers(kubelets) to approve CSR
|
## Step 3 Authorize workers(kubelets) to approve CSRs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create clusterrolebinding auto-approve-csrs-for-group \
|
||||||
|
--clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient \
|
||||||
|
--group=system:bootstrappers \
|
||||||
|
--kubeconfig admin.kubeconfig
|
||||||
```
|
```
|
||||||
master-1$ kubectl create clusterrolebinding auto-approve-csrs-for-group --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers
|
|
||||||
|
|
||||||
--------------- OR ---------------
|
--------------- OR ---------------
|
||||||
|
|
||||||
master-1$ cat > auto-approve-csrs-for-group.yaml <<EOF
|
```bash
|
||||||
|
cat > auto-approve-csrs-for-group.yaml <<EOF
|
||||||
# Approve all CSRs for the group "system:bootstrappers"
|
# Approve all CSRs for the group "system:bootstrappers"
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
@ -185,22 +163,26 @@ roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
kubectl create -f auto-approve-csrs-for-group.yaml --kubeconfig admin.kubeconfig
|
||||||
master-1$ kubectl create -f auto-approve-csrs-for-group.yaml
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#approval
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#approval
|
||||||
|
|
||||||
## Step 3 Authorize workers(kubelets) to Auto Renew Certificates on expiration
|
## Step 4 Authorize workers(kubelets) to Auto Renew Certificates on expiration
|
||||||
|
|
||||||
We now create the Cluster Role Binding required for the nodes to automatically renew the certificates on expiry. Note that we are NOT using the **system:bootstrappers** group here any more. Since by the renewal period, we believe the node would be bootstrapped and part of the cluster already. All nodes are part of the **system:nodes** group.
|
We now create the Cluster Role Binding required for the nodes to automatically renew the certificates on expiry. Note that we are NOT using the **system:bootstrappers** group here any more. Since by the renewal period, we believe the node would be bootstrapped and part of the cluster already. All nodes are part of the **system:nodes** group.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create clusterrolebinding auto-approve-renewals-for-nodes \
|
||||||
|
--clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient \
|
||||||
|
--group=system:nodes \
|
||||||
|
--kubeconfig admin.kubeconfig
|
||||||
```
|
```
|
||||||
master-1$ kubectl create clusterrolebinding auto-approve-renewals-for-nodes --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
|
|
||||||
|
|
||||||
--------------- OR ---------------
|
--------------- OR ---------------
|
||||||
|
|
||||||
master-1$ cat > auto-approve-renewals-for-nodes.yaml <<EOF
|
```bash
|
||||||
|
cat > auto-approve-renewals-for-nodes.yaml <<EOF
|
||||||
# Approve renewal CSRs for the group "system:nodes"
|
# Approve renewal CSRs for the group "system:nodes"
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
@ -217,36 +199,101 @@ roleRef:
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
||||||
master-1$ kubectl create -f auto-approve-renewals-for-nodes.yaml
|
kubectl create -f auto-approve-renewals-for-nodes.yaml --kubeconfig admin.kubeconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#approval
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#approval
|
||||||
|
|
||||||
## Step 4 Configure Kubelet to TLS Bootstrap
|
## Step 5 Configure the Binaries on the Worker node
|
||||||
|
|
||||||
|
Going forward all activities are to be done on the `worker-2` node until [step 11](#step-11-approve-server-csr).
|
||||||
|
|
||||||
|
[//]: # (host:worker-2)
|
||||||
|
|
||||||
|
### Download and Install Worker Binaries
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget -q --show-progress --https-only --timestamping \
|
||||||
|
https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \
|
||||||
|
https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kube-proxy \
|
||||||
|
https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubelet
|
||||||
|
```
|
||||||
|
|
||||||
|
Reference: https://kubernetes.io/releases/download/#binaries
|
||||||
|
|
||||||
|
Create the installation directories:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo mkdir -p \
|
||||||
|
/var/lib/kubelet/pki \
|
||||||
|
/var/lib/kube-proxy \
|
||||||
|
/var/lib/kubernetes/pki \
|
||||||
|
/var/run/kubernetes
|
||||||
|
```
|
||||||
|
|
||||||
|
Install the worker binaries:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
chmod +x kubectl kube-proxy kubelet
|
||||||
|
sudo mv kubectl kube-proxy kubelet /usr/local/bin/
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Move the certificates and secure them.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
sudo mv ca.crt kube-proxy.crt kube-proxy.key /var/lib/kubernetes/pki
|
||||||
|
sudo chown root:root /var/lib/kubernetes/pki/*
|
||||||
|
sudo chmod 600 /var/lib/kubernetes/pki/*
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 6 Configure Kubelet to TLS Bootstrap
|
||||||
|
|
||||||
It is now time to configure the second worker to TLS bootstrap using the token we generated
|
It is now time to configure the second worker to TLS bootstrap using the token we generated
|
||||||
|
|
||||||
For worker-1 we started by creating a kubeconfig file with the TLS certificates that we manually generated.
|
For worker-1 we started by creating a kubeconfig file with the TLS certificates that we manually generated.
|
||||||
Here, we don't have the certificates yet. So we cannot create a kubeconfig file. Instead we create a bootstrap-kubeconfig file with information about the token we created.
|
Here, we don't have the certificates yet. So we cannot create a kubeconfig file. Instead we create a bootstrap-kubeconfig file with information about the token we created.
|
||||||
|
|
||||||
This is to be done on the `worker-2` node.
|
This is to be done on the `worker-2` node. Note that now we have set up the load balancer to provide high availibilty across the API servers, we point kubelet to the load balancer.
|
||||||
|
|
||||||
```
|
Set up some shell variables for nodes and services we will require in the following configurations:
|
||||||
worker-2$ sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-cluster bootstrap --server='https://192.168.5.30:6443' --certificate-authority=/var/lib/kubernetes/ca.crt
|
|
||||||
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-credentials kubelet-bootstrap --token=07401b.f395accd246ae52d
|
```bash
|
||||||
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-context bootstrap --user=kubelet-bootstrap --cluster=bootstrap
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig use-context bootstrap
|
POD_CIDR=10.244.0.0/16
|
||||||
|
SERVICE_CIDR=10.96.0.0/16
|
||||||
|
CLUSTER_DNS=$(echo $SERVICE_CIDR | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s.10", $1, $2, $3) }')
|
||||||
```
|
```
|
||||||
|
|
||||||
Or
|
Set up the bootstrap kubeconfig.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig \
|
||||||
|
set-cluster bootstrap --server="https://${LOADBALANCER}:6443" --certificate-authority=/var/lib/kubernetes/pki/ca.crt
|
||||||
|
|
||||||
|
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig \
|
||||||
|
set-credentials kubelet-bootstrap --token=07401b.f395accd246ae52d
|
||||||
|
|
||||||
|
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig \
|
||||||
|
set-context bootstrap --user=kubelet-bootstrap --cluster=bootstrap
|
||||||
|
|
||||||
|
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig \
|
||||||
|
use-context bootstrap
|
||||||
|
}
|
||||||
```
|
```
|
||||||
worker-2$ cat <<EOF | sudo tee /var/lib/kubelet/bootstrap-kubeconfig
|
|
||||||
|
--------------- OR ---------------
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat <<EOF | sudo tee /var/lib/kubelet/bootstrap-kubeconfig
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
clusters:
|
clusters:
|
||||||
- cluster:
|
- cluster:
|
||||||
certificate-authority: /var/lib/kubernetes/ca.crt
|
certificate-authority: /var/lib/kubernetes/pki/ca.crt
|
||||||
server: https://192.168.5.30:6443
|
server: https://${LOADBALANCER}:6443
|
||||||
name: bootstrap
|
name: bootstrap
|
||||||
contexts:
|
contexts:
|
||||||
- context:
|
- context:
|
||||||
|
@ -263,14 +310,14 @@ users:
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubelet-configuration
|
||||||
|
|
||||||
## Step 5 Create Kubelet Config File
|
## Step 7 Create Kubelet Config File
|
||||||
|
|
||||||
Create the `kubelet-config.yaml` configuration file:
|
Create the `kubelet-config.yaml` configuration file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-2$ cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
|
cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
|
||||||
kind: KubeletConfiguration
|
kind: KubeletConfiguration
|
||||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||||
authentication:
|
authentication:
|
||||||
|
@ -279,41 +326,41 @@ authentication:
|
||||||
webhook:
|
webhook:
|
||||||
enabled: true
|
enabled: true
|
||||||
x509:
|
x509:
|
||||||
clientCAFile: "/var/lib/kubernetes/ca.crt"
|
clientCAFile: /var/lib/kubernetes/pki/ca.crt
|
||||||
authorization:
|
authorization:
|
||||||
mode: Webhook
|
mode: Webhook
|
||||||
clusterDomain: "cluster.local"
|
clusterDomain: "cluster.local"
|
||||||
clusterDNS:
|
clusterDNS:
|
||||||
- "10.96.0.10"
|
- ${CLUSTER_DNS}
|
||||||
resolvConf: "/run/systemd/resolve/resolv.conf"
|
registerNode: true
|
||||||
|
resolvConf: /run/systemd/resolve/resolv.conf
|
||||||
|
rotateCertificates: true
|
||||||
runtimeRequestTimeout: "15m"
|
runtimeRequestTimeout: "15m"
|
||||||
|
serverTLSBootstrap: true
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
> Note: We are not specifying the certificate details - tlsCertFile and tlsPrivateKeyFile - in this file
|
> Note: We are not specifying the certificate details - tlsCertFile and tlsPrivateKeyFile - in this file
|
||||||
|
|
||||||
## Step 6 Configure Kubelet Service
|
## Step 8 Configure Kubelet Service
|
||||||
|
|
||||||
Create the `kubelet.service` systemd unit file:
|
Create the `kubelet.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-2$ cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Kubelet
|
Description=Kubernetes Kubelet
|
||||||
Documentation=https://github.com/kubernetes/kubernetes
|
Documentation=https://github.com/kubernetes/kubernetes
|
||||||
After=docker.service
|
After=containerd.service
|
||||||
Requires=docker.service
|
Requires=containerd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/local/bin/kubelet \\
|
ExecStart=/usr/local/bin/kubelet \\
|
||||||
--bootstrap-kubeconfig="/var/lib/kubelet/bootstrap-kubeconfig" \\
|
--bootstrap-kubeconfig="/var/lib/kubelet/bootstrap-kubeconfig" \\
|
||||||
--config=/var/lib/kubelet/kubelet-config.yaml \\
|
--config=/var/lib/kubelet/kubelet-config.yaml \\
|
||||||
--image-pull-progress-deadline=2m \\
|
|
||||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
||||||
--cert-dir=/var/lib/kubelet/pki/ \\
|
--cert-dir=/var/lib/kubelet/pki/ \\
|
||||||
--rotate-certificates=true \\
|
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
|
||||||
--network-plugin=cni \\
|
|
||||||
--register-node=true \\
|
|
||||||
--v=2
|
--v=2
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
|
@ -326,33 +373,37 @@ EOF
|
||||||
Things to note here:
|
Things to note here:
|
||||||
- **bootstrap-kubeconfig**: Location of the bootstrap-kubeconfig file.
|
- **bootstrap-kubeconfig**: Location of the bootstrap-kubeconfig file.
|
||||||
- **cert-dir**: The directory where the generated certificates are stored.
|
- **cert-dir**: The directory where the generated certificates are stored.
|
||||||
- **rotate-certificates**: Rotates client certificates when they expire.
|
- **kubeconfig**: We specify a location for this *but we have not yet created it*. Kubelet will create one itself upon successful bootstrap.
|
||||||
|
|
||||||
## Step 7 Configure the Kubernetes Proxy
|
## Step 9 Configure the Kubernetes Proxy
|
||||||
|
|
||||||
In one of the previous steps we created the kube-proxy.kubeconfig file. Check [here](https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md) if you missed it.
|
In one of the previous steps we created the kube-proxy.kubeconfig file. Check [here](https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md) if you missed it.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-2$ sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
|
{
|
||||||
|
sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/
|
||||||
|
sudo chown root:root /var/lib/kube-proxy/kube-proxy.kubeconfig
|
||||||
|
sudo chmod 600 /var/lib/kube-proxy/kube-proxy.kubeconfig
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `kube-proxy-config.yaml` configuration file:
|
Create the `kube-proxy-config.yaml` configuration file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-2$ cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
|
cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
|
||||||
kind: KubeProxyConfiguration
|
kind: KubeProxyConfiguration
|
||||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
clientConnection:
|
clientConnection:
|
||||||
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
|
kubeconfig: /var/lib/kube-proxy/kube-proxy.kubeconfig
|
||||||
mode: "iptables"
|
mode: iptables
|
||||||
clusterCIDR: "192.168.5.0/24"
|
clusterCIDR: ${POD_CIDR}
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the `kube-proxy.service` systemd unit file:
|
Create the `kube-proxy.service` systemd unit file:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
worker-2$ cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
|
cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Kubernetes Kube Proxy
|
Description=Kubernetes Kube Proxy
|
||||||
Documentation=https://github.com/kubernetes/kubernetes
|
Documentation=https://github.com/kubernetes/kubernetes
|
||||||
|
@ -368,11 +419,12 @@ WantedBy=multi-user.target
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
## Step 8 Start the Worker Services
|
|
||||||
|
## Step 10 Start the Worker Services
|
||||||
|
|
||||||
On worker-2:
|
On worker-2:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload
|
||||||
sudo systemctl enable kubelet kube-proxy
|
sudo systemctl enable kubelet kube-proxy
|
||||||
|
@ -381,40 +433,60 @@ On worker-2:
|
||||||
```
|
```
|
||||||
> Remember to run the above commands on worker node: `worker-2`
|
> Remember to run the above commands on worker node: `worker-2`
|
||||||
|
|
||||||
|
### Optional - Check Certificates and kubeconfigs
|
||||||
|
|
||||||
## Step 9 Approve Server CSR
|
At `worker-2` node, run the following, selecting option 5
|
||||||
|
|
||||||
`master-1$ kubectl get csr`
|
```bash
|
||||||
|
./cert_verify.sh
|
||||||
```
|
|
||||||
NAME AGE REQUESTOR CONDITION
|
|
||||||
csr-95bv6 20s system:node:worker-2 Pending
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Approve
|
## Step 11 Approve Server CSR
|
||||||
|
|
||||||
|
Now, go back to `master-1` and approve the pending kubelet-serving certificate
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
[//]: # (comment:Please now manually approve the certificate before proceeding)
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl get csr --kubeconfig admin.kubeconfig
|
||||||
|
```
|
||||||
|
|
||||||
|
> Output - Note the name will be different, but it will begin with `csr-`
|
||||||
|
|
||||||
|
```
|
||||||
|
NAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITION
|
||||||
|
csr-7k8nh 85s kubernetes.io/kubelet-serving system:node:worker-2 <none> Pending
|
||||||
|
csr-n7z8p 98s kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:07401b <none> Approved,Issued
|
||||||
|
```
|
||||||
|
|
||||||
|
Approve the pending certificate. Note that the certificate name `csr-7k8nh` will be different for you, and each time you run through.
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl certificate approve csr-7k8nh --kubeconfig admin.kubeconfig
|
||||||
|
```
|
||||||
|
|
||||||
`master-1$ kubectl certificate approve csr-95bv6`
|
|
||||||
|
|
||||||
Note: In the event your cluster persists for longer than 365 days, you will need to manually approve the replacement CSR.
|
Note: In the event your cluster persists for longer than 365 days, you will need to manually approve the replacement CSR.
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubectl-approval
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubectl-approval
|
||||||
|
|
||||||
## Verification
|
## Verification
|
||||||
|
|
||||||
List the registered Kubernetes nodes from the master node:
|
List the registered Kubernetes nodes from the master node:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
master-1$ kubectl get nodes --kubeconfig admin.kubeconfig
|
kubectl get nodes --kubeconfig admin.kubeconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
> output
|
> output
|
||||||
|
|
||||||
```
|
```
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
worker-1 NotReady <none> 93s v1.13.0
|
worker-1 NotReady <none> 93s v1.24.3
|
||||||
worker-2 NotReady <none> 93s v1.13.0
|
worker-2 NotReady <none> 93s v1.24.3
|
||||||
```
|
```
|
||||||
Note: It is OK for the worker node to be in a NotReady state. That is because we haven't configured Networking yet.
|
|
||||||
|
|
||||||
Next: [Configuring Kubectl](11-configuring-kubectl.md)
|
Prev: [Bootstrapping the Kubernetes Worker Nodes](10-bootstrapping-kubernetes-workers.md)</br>
|
||||||
|
Next: [Configuring Kubectl](12-configuring-kubectl.md)
|
|
@ -1,44 +0,0 @@
|
||||||
# Provisioning Pod Network
|
|
||||||
|
|
||||||
We chose to use CNI - [weave](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) as our networking option.
|
|
||||||
|
|
||||||
### Install CNI plugins
|
|
||||||
|
|
||||||
Download the CNI Plugins required for weave on each of the worker nodes - `worker-1` and `worker-2`
|
|
||||||
|
|
||||||
`wget https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz`
|
|
||||||
|
|
||||||
Extract it to /opt/cni/bin directory
|
|
||||||
|
|
||||||
`sudo tar -xzvf cni-plugins-amd64-v0.7.5.tgz --directory /opt/cni/bin/`
|
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni
|
|
||||||
|
|
||||||
### Deploy Weave Network
|
|
||||||
|
|
||||||
Deploy weave network. Run only once on the `master` node.
|
|
||||||
|
|
||||||
|
|
||||||
`kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"`
|
|
||||||
|
|
||||||
Weave uses POD CIDR of `10.32.0.0/12` by default.
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
|
|
||||||
List the registered Kubernetes nodes from the master node:
|
|
||||||
|
|
||||||
```
|
|
||||||
master-1$ kubectl get pods -n kube-system
|
|
||||||
```
|
|
||||||
|
|
||||||
> output
|
|
||||||
|
|
||||||
```
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
weave-net-58j2j 2/2 Running 0 89s
|
|
||||||
weave-net-rr5dk 2/2 Running 0 89s
|
|
||||||
```
|
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy/#install-the-weave-net-addon
|
|
||||||
|
|
||||||
Next: [Kube API Server to Kubelet Connectivity](13-kube-apiserver-to-kubelet.md)
|
|
|
@ -8,16 +8,25 @@ In this lab you will generate a kubeconfig file for the `kubectl` command line u
|
||||||
|
|
||||||
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the external load balancer fronting the Kubernetes API Servers will be used.
|
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the external load balancer fronting the Kubernetes API Servers will be used.
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
|
On `master-1`
|
||||||
|
|
||||||
|
Get the kube-api server load-balancer IP.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
|
```
|
||||||
|
|
||||||
Generate a kubeconfig file suitable for authenticating as the `admin` user:
|
Generate a kubeconfig file suitable for authenticating as the `admin` user:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
{
|
{
|
||||||
KUBERNETES_LB_ADDRESS=192.168.5.30
|
|
||||||
|
|
||||||
kubectl config set-cluster kubernetes-the-hard-way \
|
kubectl config set-cluster kubernetes-the-hard-way \
|
||||||
--certificate-authority=ca.crt \
|
--certificate-authority=ca.crt \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--server=https://${KUBERNETES_LB_ADDRESS}:6443
|
--server=https://${LOADBALANCER}:6443
|
||||||
|
|
||||||
kubectl config set-credentials admin \
|
kubectl config set-credentials admin \
|
||||||
--client-certificate=admin.crt \
|
--client-certificate=admin.crt \
|
||||||
|
@ -44,6 +53,7 @@ kubectl get componentstatuses
|
||||||
> output
|
> output
|
||||||
|
|
||||||
```
|
```
|
||||||
|
Warning: v1 ComponentStatus is deprecated in v1.19+
|
||||||
NAME STATUS MESSAGE ERROR
|
NAME STATUS MESSAGE ERROR
|
||||||
controller-manager Healthy ok
|
controller-manager Healthy ok
|
||||||
scheduler Healthy ok
|
scheduler Healthy ok
|
||||||
|
@ -53,18 +63,17 @@ etcd-0 Healthy {"health":"true"}
|
||||||
|
|
||||||
List the nodes in the remote Kubernetes cluster:
|
List the nodes in the remote Kubernetes cluster:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl get nodes
|
kubectl get nodes
|
||||||
```
|
```
|
||||||
|
|
||||||
> output
|
> output
|
||||||
|
|
||||||
```
|
```
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
worker-1 NotReady <none> 118s v1.13.0
|
worker-1 NotReady <none> 118s v1.24.3
|
||||||
worker-2 NotReady <none> 118s v1.13.0
|
worker-2 NotReady <none> 118s v1.24.3
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: It is OK for the worker node to be in a `NotReady` state. Worker nodes will come into `Ready` state once networking is configured.
|
Prev: [TLS Bootstrapping Kubernetes Workers](11-tls-bootstrapping-kubernetes-workers.md)</br>
|
||||||
|
Next: [Deploy Pod Networking](13-configure-pod-networking.md)
|
||||||
Next: [Deploy Pod Networking](12-configure-pod-networking.md)
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
# Provisioning Pod Network
|
||||||
|
|
||||||
|
Container Network Interface (CNI) is a standard interface for managing IP networks between containers across many nodes.
|
||||||
|
|
||||||
|
We chose to use CNI - [weave](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) as our networking option.
|
||||||
|
|
||||||
|
|
||||||
|
### Deploy Weave Network
|
||||||
|
|
||||||
|
Deploy weave network. Run only once on the `master-1` node. You will see a warning, but this is OK.
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
|
On `master-1`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
|
||||||
|
```
|
||||||
|
|
||||||
|
Weave uses POD CIDR of `10.32.0.0/12` by default.
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
[//]: # (sleep:45)
|
||||||
|
|
||||||
|
List the registered Kubernetes nodes from the master node:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl get pods -n kube-system
|
||||||
|
```
|
||||||
|
|
||||||
|
> output
|
||||||
|
|
||||||
|
```
|
||||||
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
weave-net-58j2j 2/2 Running 0 89s
|
||||||
|
weave-net-rr5dk 2/2 Running 0 89s
|
||||||
|
```
|
||||||
|
|
||||||
|
Once the Weave pods are fully running which might take up to 60 seconds, the nodes should be ready
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl get nodes
|
||||||
|
```
|
||||||
|
|
||||||
|
> Output
|
||||||
|
|
||||||
|
```
|
||||||
|
NAME STATUS ROLES AGE VERSION
|
||||||
|
worker-1 Ready <none> 4m11s v1.24.3
|
||||||
|
worker-2 Ready <none> 2m49s v1.24.3
|
||||||
|
```
|
||||||
|
|
||||||
|
Reference: https://kubernetes.io/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy/#install-the-weave-net-addon
|
||||||
|
|
||||||
|
Prev: [Configuring Kubectl](12-configuring-kubectl.md)</br>
|
||||||
|
Next: [Kube API Server to Kubelet Connectivity](14-kube-apiserver-to-kubelet.md)
|
|
@ -4,12 +4,14 @@ In this section you will configure RBAC permissions to allow the Kubernetes API
|
||||||
|
|
||||||
> This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization.
|
> This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization.
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
|
|
||||||
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods:
|
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
|
@ -30,15 +32,15 @@ rules:
|
||||||
- "*"
|
- "*"
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
Reference: https://v1-12.docs.kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole
|
||||||
|
|
||||||
The Kubernetes API Server authenticates to the Kubelet as the `system:kube-apiserver` user using the client certificate as defined by the `--kubelet-client-certificate` flag.
|
The Kubernetes API Server authenticates to the Kubelet as the `system:kube-apiserver` user using the client certificate as defined by the `--kubelet-client-certificate` flag.
|
||||||
|
|
||||||
Bind the `system:kube-apiserver-to-kubelet` ClusterRole to the `system:kube-apiserver` user:
|
Bind the `system:kube-apiserver-to-kubelet` ClusterRole to the `system:kube-apiserver` user:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: system:kube-apiserver
|
name: system:kube-apiserver
|
||||||
|
@ -55,4 +57,5 @@ EOF
|
||||||
```
|
```
|
||||||
Reference: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding
|
Reference: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding
|
||||||
|
|
||||||
Next: [DNS Addon](14-dns-addon.md)
|
Prev: [Deploy Pod Networking](13-configure-pod-networking.md)</br>
|
||||||
|
Next: [DNS Addon](15-dns-addon.md)
|
|
@ -4,9 +4,13 @@ In this lab you will deploy the [DNS add-on](https://kubernetes.io/docs/concepts
|
||||||
|
|
||||||
## The DNS Cluster Add-on
|
## The DNS Cluster Add-on
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
Deploy the `coredns` cluster add-on:
|
Deploy the `coredns` cluster add-on:
|
||||||
|
|
||||||
```
|
Note that if you have [changed the service CIDR range](./01-prerequisites.md#service-network) and thus this file, you will need to save your copy onto `master-1` (paste to vi, then save) and apply that.
|
||||||
|
|
||||||
|
```bash
|
||||||
kubectl apply -f https://raw.githubusercontent.com/mmumshad/kubernetes-the-hard-way/master/deployments/coredns.yaml
|
kubectl apply -f https://raw.githubusercontent.com/mmumshad/kubernetes-the-hard-way/master/deployments/coredns.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -23,7 +27,9 @@ service/kube-dns created
|
||||||
|
|
||||||
List the pods created by the `kube-dns` deployment:
|
List the pods created by the `kube-dns` deployment:
|
||||||
|
|
||||||
```
|
[//]: # (sleep:15)
|
||||||
|
|
||||||
|
```bash
|
||||||
kubectl get pods -l k8s-app=kube-dns -n kube-system
|
kubectl get pods -l k8s-app=kube-dns -n kube-system
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -39,15 +45,18 @@ Reference: https://kubernetes.io/docs/tasks/administer-cluster/coredns/#installi
|
||||||
|
|
||||||
## Verification
|
## Verification
|
||||||
|
|
||||||
Create a `busybox` deployment:
|
Create a `busybox` pod:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl run --generator=run-pod/v1 busybox --image=busybox:1.28 --command -- sleep 3600
|
kubectl run busybox --image=busybox:1.28 --command -- sleep 3600
|
||||||
```
|
```
|
||||||
|
|
||||||
List the pod created by the `busybox` deployment:
|
[//]: # (sleep:10)
|
||||||
|
|
||||||
```
|
|
||||||
|
List the pod created by the `busybox` pod:
|
||||||
|
|
||||||
|
```bash
|
||||||
kubectl get pods -l run=busybox
|
kubectl get pods -l run=busybox
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -60,7 +69,7 @@ busybox-bd8fb7cbd-vflm9 1/1 Running 0 10s
|
||||||
|
|
||||||
Execute a DNS lookup for the `kubernetes` service inside the `busybox` pod:
|
Execute a DNS lookup for the `kubernetes` service inside the `busybox` pod:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl exec -ti busybox -- nslookup kubernetes
|
kubectl exec -ti busybox -- nslookup kubernetes
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -74,4 +83,5 @@ Name: kubernetes
|
||||||
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
|
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
|
||||||
```
|
```
|
||||||
|
|
||||||
Next: [Smoke Test](15-smoke-test.md)
|
Prev: [Kube API Server to Kubelet Connectivity](14-kube-apiserver-to-kubelet.md)</br>
|
||||||
|
Next: [Smoke Test](16-smoke-test.md)
|
|
@ -1,41 +0,0 @@
|
||||||
# Run End-to-End Tests
|
|
||||||
|
|
||||||
Install Go
|
|
||||||
|
|
||||||
```
|
|
||||||
wget https://dl.google.com/go/go1.15.linux-amd64.tar.gz
|
|
||||||
|
|
||||||
sudo tar -C /usr/local -xzf go1.15.linux-amd64.tar.gz
|
|
||||||
export GOPATH="/home/vagrant/go"
|
|
||||||
export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install kubetest
|
|
||||||
|
|
||||||
```
|
|
||||||
git clone https://github.com/kubernetes/test-infra.git
|
|
||||||
cd test-infra/
|
|
||||||
GO111MODULE=on go install ./kubetest
|
|
||||||
```
|
|
||||||
|
|
||||||
> Note: This may take a few minutes depending on your network speed
|
|
||||||
|
|
||||||
## Use the version specific to your cluster
|
|
||||||
|
|
||||||
```
|
|
||||||
K8S_VERSION=$(kubectl version -o json | jq -r '.serverVersion.gitVersion')
|
|
||||||
export KUBERNETES_CONFORMANCE_TEST=y
|
|
||||||
export KUBECONFIG="$HOME/.kube/config"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
kubetest --provider=skeleton --test --test_args=”--ginkgo.focus=\[Conformance\]” --extract ${K8S_VERSION} | tee test.out
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
This could take about 1.5 to 2 hours. The number of tests run and passed will be displayed at the end.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Next: [Dynamic Kubelet configuration](17-extra-dynamic-kubelet-configuration.md)
|
|
|
@ -4,18 +4,20 @@ In this lab you will complete a series of tasks to ensure your Kubernetes cluste
|
||||||
|
|
||||||
## Data Encryption
|
## Data Encryption
|
||||||
|
|
||||||
|
[//]: # (host:master-1)
|
||||||
|
|
||||||
In this section you will verify the ability to [encrypt secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#verifying-that-data-is-encrypted).
|
In this section you will verify the ability to [encrypt secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#verifying-that-data-is-encrypted).
|
||||||
|
|
||||||
Create a generic secret:
|
Create a generic secret:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl create secret generic kubernetes-the-hard-way \
|
kubectl create secret generic kubernetes-the-hard-way \
|
||||||
--from-literal="mykey=mydata"
|
--from-literal="mykey=mydata"
|
||||||
```
|
```
|
||||||
|
|
||||||
Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd:
|
Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
sudo ETCDCTL_API=3 etcdctl get \
|
sudo ETCDCTL_API=3 etcdctl get \
|
||||||
--endpoints=https://127.0.0.1:2379 \
|
--endpoints=https://127.0.0.1:2379 \
|
||||||
--cacert=/etc/etcd/ca.crt \
|
--cacert=/etc/etcd/ca.crt \
|
||||||
|
@ -48,7 +50,9 @@ sudo ETCDCTL_API=3 etcdctl get \
|
||||||
The etcd key should be prefixed with `k8s:enc:aescbc:v1:key1`, which indicates the `aescbc` provider was used to encrypt the data with the `key1` encryption key.
|
The etcd key should be prefixed with `k8s:enc:aescbc:v1:key1`, which indicates the `aescbc` provider was used to encrypt the data with the `key1` encryption key.
|
||||||
|
|
||||||
Cleanup:
|
Cleanup:
|
||||||
`kubectl delete secret kubernetes-the-hard-way`
|
```bash
|
||||||
|
kubectl delete secret kubernetes-the-hard-way
|
||||||
|
```
|
||||||
|
|
||||||
## Deployments
|
## Deployments
|
||||||
|
|
||||||
|
@ -56,13 +60,15 @@ In this section you will verify the ability to create and manage [Deployments](h
|
||||||
|
|
||||||
Create a deployment for the [nginx](https://nginx.org/en/) web server:
|
Create a deployment for the [nginx](https://nginx.org/en/) web server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create deployment nginx --image=nginx:1.23.1
|
||||||
```
|
```
|
||||||
kubectl create deployment nginx --image=nginx
|
|
||||||
```
|
[//]: # (sleep:15)
|
||||||
|
|
||||||
List the pod created by the `nginx` deployment:
|
List the pod created by the `nginx` deployment:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl get pods -l app=nginx
|
kubectl get pods -l app=nginx
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -79,18 +85,18 @@ In this section you will verify the ability to access applications remotely usin
|
||||||
|
|
||||||
Create a service to expose deployment nginx on node ports.
|
Create a service to expose deployment nginx on node ports.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl expose deploy nginx --type=NodePort --port 80
|
kubectl expose deploy nginx --type=NodePort --port 80
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```
|
```bash
|
||||||
PORT_NUMBER=$(kubectl get svc -l app=nginx -o jsonpath="{.items[0].spec.ports[0].nodePort}")
|
PORT_NUMBER=$(kubectl get svc -l app=nginx -o jsonpath="{.items[0].spec.ports[0].nodePort}")
|
||||||
```
|
```
|
||||||
|
|
||||||
Test to view NGINX page
|
Test to view NGINX page
|
||||||
|
|
||||||
```
|
```bash
|
||||||
curl http://worker-1:$PORT_NUMBER
|
curl http://worker-1:$PORT_NUMBER
|
||||||
curl http://worker-2:$PORT_NUMBER
|
curl http://worker-2:$PORT_NUMBER
|
||||||
```
|
```
|
||||||
|
@ -112,13 +118,13 @@ In this section you will verify the ability to [retrieve container logs](https:/
|
||||||
|
|
||||||
Retrieve the full name of the `nginx` pod:
|
Retrieve the full name of the `nginx` pod:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
|
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
|
||||||
```
|
```
|
||||||
|
|
||||||
Print the `nginx` pod logs:
|
Print the `nginx` pod logs:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl logs $POD_NAME
|
kubectl logs $POD_NAME
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -135,14 +141,15 @@ In this section you will verify the ability to [execute commands in a container]
|
||||||
|
|
||||||
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
|
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl exec -ti $POD_NAME -- nginx -v
|
kubectl exec -ti $POD_NAME -- nginx -v
|
||||||
```
|
```
|
||||||
|
|
||||||
> output
|
> output
|
||||||
|
|
||||||
```
|
```
|
||||||
nginx version: nginx/1.15.9
|
nginx version: nginx/1.23.1
|
||||||
```
|
```
|
||||||
|
|
||||||
Next: [End to End Tests](16-e2e-tests.md)
|
Prev: [DNS Addon](15-dns-addon.md)</br>
|
||||||
|
Next: [End to End Tests](17-e2e-tests.md)
|
|
@ -0,0 +1,40 @@
|
||||||
|
# Run End-to-End Tests
|
||||||
|
|
||||||
|
## Install Go
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget https://dl.google.com/go/go1.18.linux-amd64.tar.gz
|
||||||
|
|
||||||
|
sudo tar -C /usr/local -xzf go1.18.linux-amd64.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install kubetest
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone --depth 1 https://github.com/kubernetes/test-infra.git
|
||||||
|
cd test-infra/kubetest
|
||||||
|
export GOPATH="$HOME/go"
|
||||||
|
export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
|
||||||
|
go build
|
||||||
|
```
|
||||||
|
|
||||||
|
> Note: it will take a while to build as it has many dependencies.
|
||||||
|
|
||||||
|
|
||||||
|
## Use the version specific to your cluster
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo apt install jq -y
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
K8S_VERSION=$(kubectl version -o json | jq -r '.serverVersion.gitVersion')
|
||||||
|
export KUBERNETES_CONFORMANCE_TEST=y
|
||||||
|
export KUBECONFIG="$HOME/.kube/config"
|
||||||
|
|
||||||
|
./kubetest --provider=skeleton --test --test_args=”--ginkgo.focus=\[Conformance\]” --extract ${K8S_VERSION} | tee test.out
|
||||||
|
```
|
||||||
|
|
||||||
|
This could take *18 hours or more*! There are several thousand tests in the suite. The number of tests run and passed will be displayed at the end. Expect some failures as it tries tests that aren't supported by our cluster, e.g. mounting persistent volumes using NFS.
|
||||||
|
|
||||||
|
Prev: [Smoke Test](16-smoke-test.md)
|
|
@ -1,58 +0,0 @@
|
||||||
# Dynamic Kubelet Configuration
|
|
||||||
|
|
||||||
`sudo apt install -y jq`
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
NODE_NAME="worker-1"; curl -sSL "https://localhost:6443/api/v1/nodes/${NODE_NAME}/proxy/configz" -k --cert admin.crt --key admin.key | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubelet_configz_${NODE_NAME}
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
kubectl -n kube-system create configmap nodes-config --from-file=kubelet=kubelet_configz_${NODE_NAME} --append-hash -o yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Edit `worker-1` node to use the dynamically created configuration
|
|
||||||
```
|
|
||||||
master-1# kubectl edit node worker-1
|
|
||||||
```
|
|
||||||
|
|
||||||
Add the following YAML bit under `spec`:
|
|
||||||
```
|
|
||||||
configSource:
|
|
||||||
configMap:
|
|
||||||
name: CONFIG_MAP_NAME # replace CONFIG_MAP_NAME with the name of the ConfigMap
|
|
||||||
namespace: kube-system
|
|
||||||
kubeletConfigKey: kubelet
|
|
||||||
```
|
|
||||||
|
|
||||||
Configure Kubelet Service
|
|
||||||
|
|
||||||
Create the `kubelet.service` systemd unit file:
|
|
||||||
|
|
||||||
```
|
|
||||||
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
|
||||||
[Unit]
|
|
||||||
Description=Kubernetes Kubelet
|
|
||||||
Documentation=https://github.com/kubernetes/kubernetes
|
|
||||||
After=docker.service
|
|
||||||
Requires=docker.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
ExecStart=/usr/local/bin/kubelet \\
|
|
||||||
--bootstrap-kubeconfig="/var/lib/kubelet/bootstrap-kubeconfig" \\
|
|
||||||
--image-pull-progress-deadline=2m \\
|
|
||||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
|
||||||
--dynamic-config-dir=/var/lib/kubelet/dynamic-config \\
|
|
||||||
--cert-dir= /var/lib/kubelet/ \\
|
|
||||||
--network-plugin=cni \\
|
|
||||||
--register-node=true \\
|
|
||||||
--v=2
|
|
||||||
Restart=on-failure
|
|
||||||
RestartSec=5
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
Reference: https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/
|
|
|
@ -0,0 +1,114 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Generate a set of scripts from the documentation by reading markdown comments to determine target hosts
|
||||||
|
# and extracting all scripts fenced by ```bash
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Point it to the docs directory containing the lab documents
|
||||||
|
# It will create shell scripts in docs/../quick-steps
|
||||||
|
# THe scripts are ordered by lab number, then a, b, c where a lab requires you to use more than one host,
|
||||||
|
# and the filename indicates which hosts to run
|
||||||
|
#
|
||||||
|
# Hosts and other things are determined from markdown comments.
|
||||||
|
# For documents containing no markdown comments, scripts are not generated.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
import codecs
|
||||||
|
import argparse
|
||||||
|
from enum import Enum
|
||||||
|
from sys import exit
|
||||||
|
|
||||||
|
class State(Enum):
|
||||||
|
NONE = 0
|
||||||
|
SCRIPT = 1
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Extract scripts from markdown")
|
||||||
|
parser.add_argument("--path", '-p', required=True, help='Path to markdown docs')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
docs_path = os.path.abspath(args.path)
|
||||||
|
|
||||||
|
if not os.path.isdir(docs_path):
|
||||||
|
print (f'Invalid path: {docs_path}')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
qs_path = os.path.abspath(os.path.join(docs_path, '../quick-steps'))
|
||||||
|
|
||||||
|
if not os.path.isdir(qs_path):
|
||||||
|
os.makedirs(qs_path)
|
||||||
|
|
||||||
|
newline = chr(10) # In case running on Windows (plus writing files as binary to not convert to \r\n)
|
||||||
|
file_number_rx = re.compile(r'^(?P<number>\d+)')
|
||||||
|
comment_rx = re.compile(r'^\[//\]:\s\#\s\((?P<token>\w+):(?P<value>[^\)]+)\)')
|
||||||
|
choice_rx = re.compile(r'^\s*-+\s+OR\s+-+')
|
||||||
|
script_begin = '```bash'
|
||||||
|
script_end = '```'
|
||||||
|
script_open = ('{' + newline).encode('utf-8')
|
||||||
|
script_close = '}'.encode('utf-8')
|
||||||
|
current_host = None
|
||||||
|
|
||||||
|
def write_script(filename: str, script: list):
|
||||||
|
path = os.path.join(qs_path, filename)
|
||||||
|
with open(path, "wb") as f:
|
||||||
|
f.write(script_open)
|
||||||
|
f.write(newline.join(script).encode('utf-8'))
|
||||||
|
f.write(script_close)
|
||||||
|
print(f'-> {path}')
|
||||||
|
|
||||||
|
|
||||||
|
for doc in glob.glob(os.path.join(docs_path, '*.md')):
|
||||||
|
print(doc)
|
||||||
|
script = []
|
||||||
|
state = State.NONE
|
||||||
|
ignore_next_script = False
|
||||||
|
m = file_number_rx.search(os.path.basename(doc))
|
||||||
|
if not m:
|
||||||
|
continue
|
||||||
|
file_no = m['number']
|
||||||
|
section = 0
|
||||||
|
output_file = None
|
||||||
|
with codecs.open(doc, "r", encoding='utf-8') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
line = line.rstrip()
|
||||||
|
if state == State.NONE:
|
||||||
|
m = comment_rx.search(line)
|
||||||
|
if m:
|
||||||
|
token = m['token']
|
||||||
|
value = m['value']
|
||||||
|
if token == 'host':
|
||||||
|
if script:
|
||||||
|
write_script(output_file, script)
|
||||||
|
script = []
|
||||||
|
output_file = os.path.join(qs_path, f'{file_no}{chr(97 + section)}-{value}.sh')
|
||||||
|
section += 1
|
||||||
|
elif token == 'sleep':
|
||||||
|
script.extend([
|
||||||
|
f'echo "Sleeping {value}s"',
|
||||||
|
f'sleep {value}',
|
||||||
|
newline
|
||||||
|
])
|
||||||
|
elif token == 'comment':
|
||||||
|
script.extend([
|
||||||
|
'#######################################################################',
|
||||||
|
'#',
|
||||||
|
f'# {value}',
|
||||||
|
'#',
|
||||||
|
'#######################################################################',
|
||||||
|
newline
|
||||||
|
])
|
||||||
|
elif line == script_begin:
|
||||||
|
state = State.SCRIPT
|
||||||
|
elif choice_rx.match(line):
|
||||||
|
ignore_next_script = True
|
||||||
|
elif state == State.SCRIPT:
|
||||||
|
if line == script_end:
|
||||||
|
state = State.NONE
|
||||||
|
script.append(newline)
|
||||||
|
ignore_next_script = False
|
||||||
|
elif not (ignore_next_script or line == '{' or line == '}'):
|
||||||
|
script.append(line)
|
||||||
|
if output_file and script:
|
||||||
|
write_script(output_file, script)
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
# Vagrant
|
||||||
|
|
||||||
|
This directory contains the configuration for the virtual machines we will use for the installation.
|
||||||
|
|
||||||
|
A few prerequisites are handled by the VM provisioning steps.
|
||||||
|
|
||||||
|
## Kernel Settings
|
||||||
|
|
||||||
|
1. Disable cgroups v2. I found that Kubernetes currently doesn't play nice with cgroups v2, therefore we need to set a kernel boot parameter in grub to switch back to v1.
|
||||||
|
1. Install the `br_netfilter` kernel module that permits kube-proxy to manipulate IP tables rules
|
||||||
|
1. Add the two tunables `net.bridge.bridge-nf-call-iptables=1` and `net.ipv4.ip_forward=1` also required for successful pod networking.
|
||||||
|
|
||||||
|
## DNS settings
|
||||||
|
|
||||||
|
1. Set the default DNS server to be Google, as we know this always works.
|
||||||
|
1. Set up `/etc/hosts` so that all the VMs can resolve each other
|
||||||
|
|
||||||
|
## Other settings
|
||||||
|
|
||||||
|
1. Install configs for `vim` and `tmux` on master-1
|
|
@ -6,11 +6,37 @@
|
||||||
NUM_MASTER_NODE = 2
|
NUM_MASTER_NODE = 2
|
||||||
NUM_WORKER_NODE = 2
|
NUM_WORKER_NODE = 2
|
||||||
|
|
||||||
IP_NW = "192.168.5."
|
IP_NW = "192.168.56."
|
||||||
MASTER_IP_START = 10
|
MASTER_IP_START = 10
|
||||||
NODE_IP_START = 20
|
NODE_IP_START = 20
|
||||||
LB_IP_START = 30
|
LB_IP_START = 30
|
||||||
|
|
||||||
|
# Sets up hosts file and DNS
|
||||||
|
def setup_dns(node)
|
||||||
|
# Set up /etc/hosts
|
||||||
|
node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
|
||||||
|
s.args = ["enp0s8", node.vm.hostname]
|
||||||
|
end
|
||||||
|
# Set up DNS resolution
|
||||||
|
node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Runs provisioning steps that are required by masters and workers
|
||||||
|
def provision_kubernetes_node(node)
|
||||||
|
# Set up kernel parameters, modules and tunables
|
||||||
|
node.vm.provision "setup-kernel", :type => "shell", :path => "ubuntu/setup-kernel.sh"
|
||||||
|
# Restart
|
||||||
|
node.vm.provision :shell do |shell|
|
||||||
|
shell.privileged = true
|
||||||
|
shell.inline = "echo Rebooting"
|
||||||
|
shell.reboot = true
|
||||||
|
end
|
||||||
|
# Set up DNS
|
||||||
|
setup_dns node
|
||||||
|
# Install cert verification script
|
||||||
|
node.vm.provision "shell", inline: "ln -s /vagrant/ubuntu/cert_verify.sh /home/vagrant/cert_verify.sh"
|
||||||
|
end
|
||||||
|
|
||||||
# All Vagrant configuration is done below. The "2" in Vagrant.configure
|
# All Vagrant configuration is done below. The "2" in Vagrant.configure
|
||||||
# configures the configuration version (we support older styles for
|
# configures the configuration version (we support older styles for
|
||||||
# backwards compatibility). Please don't change it unless you know what
|
# backwards compatibility). Please don't change it unless you know what
|
||||||
|
@ -23,99 +49,63 @@ Vagrant.configure("2") do |config|
|
||||||
# Every Vagrant development environment requires a box. You can search for
|
# Every Vagrant development environment requires a box. You can search for
|
||||||
# boxes at https://vagrantcloud.com/search.
|
# boxes at https://vagrantcloud.com/search.
|
||||||
# config.vm.box = "base"
|
# config.vm.box = "base"
|
||||||
config.vm.box = "ubuntu/bionic64"
|
config.vm.box = "ubuntu/jammy64"
|
||||||
|
|
||||||
# Disable automatic box update checking. If you disable this, then
|
# Disable automatic box update checking. If you disable this, then
|
||||||
# boxes will only be checked for updates when the user runs
|
# boxes will only be checked for updates when the user runs
|
||||||
# `vagrant box outdated`. This is not recommended.
|
# `vagrant box outdated`. This is not recommended.
|
||||||
config.vm.box_check_update = false
|
config.vm.box_check_update = false
|
||||||
|
|
||||||
# Create a public network, which generally matched to bridged network.
|
|
||||||
# Bridged networks make the machine appear as another physical device on
|
|
||||||
# your network.
|
|
||||||
# config.vm.network "public_network"
|
|
||||||
|
|
||||||
# Share an additional folder to the guest VM. The first argument is
|
|
||||||
# the path on the host to the actual folder. The second argument is
|
|
||||||
# the path on the guest to mount the folder. And the optional third
|
|
||||||
# argument is a set of non-required options.
|
|
||||||
# config.vm.synced_folder "../data", "/vagrant_data"
|
|
||||||
|
|
||||||
# Provider-specific configuration so you can fine-tune various
|
|
||||||
# backing providers for Vagrant. These expose provider-specific options.
|
|
||||||
# Example for VirtualBox:
|
|
||||||
#
|
|
||||||
# config.vm.provider "virtualbox" do |vb|
|
|
||||||
# # Customize the amount of memory on the VM:
|
|
||||||
# vb.memory = "1024"
|
|
||||||
# end
|
|
||||||
#
|
|
||||||
# View the documentation for the provider you are using for more
|
|
||||||
# information on available options.
|
|
||||||
|
|
||||||
# Provision Master Nodes
|
# Provision Master Nodes
|
||||||
(1..NUM_MASTER_NODE).each do |i|
|
(1..NUM_MASTER_NODE).each do |i|
|
||||||
config.vm.define "master-#{i}" do |node|
|
config.vm.define "master-#{i}" do |node|
|
||||||
# Name shown in the GUI
|
# Name shown in the GUI
|
||||||
node.vm.provider "virtualbox" do |vb|
|
node.vm.provider "virtualbox" do |vb|
|
||||||
vb.name = "kubernetes-ha-master-#{i}"
|
vb.name = "kubernetes-ha-master-#{i}"
|
||||||
vb.memory = 2048
|
if i == 1
|
||||||
vb.cpus = 2
|
vb.memory = 2048 # More needed to run e2e tests at end
|
||||||
|
else
|
||||||
|
vb.memory = 1024
|
||||||
end
|
end
|
||||||
node.vm.hostname = "master-#{i}"
|
vb.cpus = 2
|
||||||
node.vm.network :private_network, ip: IP_NW + "#{MASTER_IP_START + i}"
|
|
||||||
node.vm.network "forwarded_port", guest: 22, host: "#{2710 + i}"
|
|
||||||
|
|
||||||
node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
|
|
||||||
s.args = ["enp0s8"]
|
|
||||||
end
|
|
||||||
|
|
||||||
node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
|
|
||||||
node.vm.provision "file", source: "./ubuntu/cert_verify.sh", destination: "$HOME/"
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
node.vm.hostname = "master-#{i}"
|
||||||
|
node.vm.network :private_network, ip: IP_NW + "#{MASTER_IP_START + i}"
|
||||||
|
node.vm.network "forwarded_port", guest: 22, host: "#{2710 + i}"
|
||||||
|
provision_kubernetes_node node
|
||||||
|
if i == 1
|
||||||
|
# Install (opinionated) configs for vim and tmux on master-1. These used by the author for CKA exam.
|
||||||
|
node.vm.provision "file", source: "./ubuntu/tmux.conf", destination: "$HOME/.tmux.conf"
|
||||||
|
node.vm.provision "file", source: "./ubuntu/vimrc", destination: "$HOME/.vimrc"
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Provision Load Balancer Node
|
# Provision Load Balancer Node
|
||||||
config.vm.define "loadbalancer" do |node|
|
config.vm.define "loadbalancer" do |node|
|
||||||
node.vm.provider "virtualbox" do |vb|
|
node.vm.provider "virtualbox" do |vb|
|
||||||
vb.name = "kubernetes-ha-lb"
|
vb.name = "kubernetes-ha-lb"
|
||||||
vb.memory = 512
|
vb.memory = 512
|
||||||
vb.cpus = 1
|
vb.cpus = 1
|
||||||
end
|
end
|
||||||
node.vm.hostname = "loadbalancer"
|
node.vm.hostname = "loadbalancer"
|
||||||
node.vm.network :private_network, ip: IP_NW + "#{LB_IP_START}"
|
node.vm.network :private_network, ip: IP_NW + "#{LB_IP_START}"
|
||||||
node.vm.network "forwarded_port", guest: 22, host: 2730
|
node.vm.network "forwarded_port", guest: 22, host: 2730
|
||||||
|
setup_dns node
|
||||||
node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
|
|
||||||
s.args = ["enp0s8"]
|
|
||||||
end
|
|
||||||
|
|
||||||
node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# Provision Worker Nodes
|
# Provision Worker Nodes
|
||||||
(1..NUM_WORKER_NODE).each do |i|
|
(1..NUM_WORKER_NODE).each do |i|
|
||||||
config.vm.define "worker-#{i}" do |node|
|
config.vm.define "worker-#{i}" do |node|
|
||||||
node.vm.provider "virtualbox" do |vb|
|
node.vm.provider "virtualbox" do |vb|
|
||||||
vb.name = "kubernetes-ha-worker-#{i}"
|
vb.name = "kubernetes-ha-worker-#{i}"
|
||||||
vb.memory = 512
|
vb.memory = 1024
|
||||||
vb.cpus = 1
|
vb.cpus = 1
|
||||||
end
|
end
|
||||||
node.vm.hostname = "worker-#{i}"
|
node.vm.hostname = "worker-#{i}"
|
||||||
node.vm.network :private_network, ip: IP_NW + "#{NODE_IP_START + i}"
|
node.vm.network :private_network, ip: IP_NW + "#{NODE_IP_START + i}"
|
||||||
node.vm.network "forwarded_port", guest: 22, host: "#{2720 + i}"
|
node.vm.network "forwarded_port", guest: 22, host: "#{2720 + i}"
|
||||||
|
provision_kubernetes_node node
|
||||||
node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
|
|
||||||
s.args = ["enp0s8"]
|
|
||||||
end
|
|
||||||
|
|
||||||
node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
|
|
||||||
node.vm.provision "install-docker", type: "shell", :path => "ubuntu/install-docker-2.sh"
|
|
||||||
node.vm.provision "allow-bridge-nf-traffic", :type => "shell", :path => "ubuntu/allow-bridge-nf-traffic.sh"
|
|
||||||
node.vm.provision "file", source: "./ubuntu/cert_verify.sh", destination: "$HOME/"
|
|
||||||
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
sysctl net.bridge.bridge-nf-call-iptables=1
|
|
|
@ -4,19 +4,23 @@ set -e
|
||||||
|
|
||||||
# Green & Red marking for Success and Failed messages
|
# Green & Red marking for Success and Failed messages
|
||||||
SUCCESS='\033[0;32m'
|
SUCCESS='\033[0;32m'
|
||||||
FAILED='\033[0;31m'
|
FAILED='\033[0;31;1m'
|
||||||
NC='\033[0m'
|
NC='\033[0m'
|
||||||
|
|
||||||
# All Cert Location
|
# IP addresses
|
||||||
|
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
||||||
|
MASTER_1=$(dig +short master-1)
|
||||||
|
MASTER_2=$(dig +short master-2)
|
||||||
|
WORKER_1=$(dig +short worker-1)
|
||||||
|
WORKER_2=$(dig +short worker-2)
|
||||||
|
LOADBALANCER=$(dig +short loadbalancer)
|
||||||
|
LOCALHOST="127.0.0.1"
|
||||||
|
|
||||||
|
# All Cert Location
|
||||||
# ca certificate location
|
# ca certificate location
|
||||||
CACERT=ca.crt
|
CACERT=ca.crt
|
||||||
CAKEY=ca.key
|
CAKEY=ca.key
|
||||||
|
|
||||||
# admin certificate location
|
|
||||||
ADMINCERT=admin.crt
|
|
||||||
ADMINKEY=admin.key
|
|
||||||
|
|
||||||
# Kube controller manager certificate location
|
# Kube controller manager certificate location
|
||||||
KCMCERT=kube-controller-manager.crt
|
KCMCERT=kube-controller-manager.crt
|
||||||
KCMKEY=kube-controller-manager.key
|
KCMKEY=kube-controller-manager.key
|
||||||
|
@ -91,294 +95,70 @@ SYSTEMD_WORKER_1_KP=/etc/systemd/system/kube-proxy.service
|
||||||
|
|
||||||
# Function - Master node #
|
# Function - Master node #
|
||||||
|
|
||||||
check_cert_ca()
|
check_cert_and_key()
|
||||||
{
|
{
|
||||||
if [ -z $CACERT ] && [ -z $CAKEY ]
|
local name=$1
|
||||||
|
local subject=$2
|
||||||
|
local issuer=$3
|
||||||
|
local nokey=
|
||||||
|
local cert="${CERT_LOCATION}/$1.crt"
|
||||||
|
local key="${CERT_LOCATION}/$1.key"
|
||||||
|
|
||||||
|
if [ -z $cert -o -z $key ]
|
||||||
then
|
then
|
||||||
printf "${FAILED}please specify cert and key location\n"
|
printf "${FAILED}cert and/or key not present in ${CERT_LOCATION}. Perhaps you missed a copy step\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ -f $CACERT ] && [ -f $CAKEY ]
|
elif [ -f $cert -a -f $key ]
|
||||||
then
|
then
|
||||||
printf "${NC}CA cert and key found, verifying the authenticity\n"
|
printf "${NC}${name} cert and key found, verifying the authenticity\n"
|
||||||
CACERT_SUBJECT=$(openssl x509 -in $CACERT -text | grep "Subject: CN"| tr -d " ")
|
CERT_SUBJECT=$(sudo openssl x509 -in $cert -text | grep "Subject: CN"| tr -d " ")
|
||||||
CACERT_ISSUER=$(openssl x509 -in $CACERT -text | grep "Issuer: CN"| tr -d " ")
|
CERT_ISSUER=$(sudo openssl x509 -in $cert -text | grep "Issuer: CN"| tr -d " ")
|
||||||
CACERT_MD5=$(openssl x509 -noout -modulus -in $CACERT | openssl md5| awk '{print $2}')
|
CERT_MD5=$(sudo openssl x509 -noout -modulus -in $cert | openssl md5| awk '{print $2}')
|
||||||
CAKEY_MD5=$(openssl rsa -noout -modulus -in $CAKEY | openssl md5| awk '{print $2}')
|
KEY_MD5=$(sudo openssl rsa -noout -modulus -in $key | openssl md5| awk '{print $2}')
|
||||||
if [ $CACERT_SUBJECT == "Subject:CN=KUBERNETES-CA" ] && [ $CACERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $CACERT_MD5 == $CAKEY_MD5 ]
|
if [ $CERT_SUBJECT == "${subject}" ] && [ $CERT_ISSUER == "${issuer}" ] && [ $CERT_MD5 == $KEY_MD5 ]
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}CA cert and key are correct\n"
|
printf "${SUCCESS}${name} cert and key are correct\n${NC}"
|
||||||
else
|
else
|
||||||
printf "${FAILED}Exiting...Found mismtach in the CA certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n"
|
printf "${FAILED}Exiting...Found mismtach in the ${name} certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
printf "${FAILED}ca.crt / ca.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n"
|
printf "${FAILED}${cert} / ${key} is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n"
|
||||||
|
echo "These should be in /var/lib/kubernetes/pki (most certs), /etc/etcd (eccd server certs) or /var/lib/kubelet (kubelet certs)${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
check_cert_only()
|
||||||
check_cert_admin()
|
|
||||||
{
|
{
|
||||||
if [ -z $ADMINCERT ] && [ -z $ADMINKEY ]
|
local name=$1
|
||||||
|
local subject=$2
|
||||||
|
local issuer=$3
|
||||||
|
local cert="${CERT_LOCATION}/$1.crt"
|
||||||
|
|
||||||
|
# Worker-2 auto cert is a .pem
|
||||||
|
[ -f "${CERT_LOCATION}/$1.pem" ] && cert="${CERT_LOCATION}/$1.pem"
|
||||||
|
|
||||||
|
if [ -z $cert ]
|
||||||
then
|
then
|
||||||
printf "${FAILED}please specify cert and key location\n"
|
printf "${FAILED}cert not present in ${CERT_LOCATION}. Perhaps you missed a copy step\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ -f $ADMINCERT ] && [ -f $ADMINKEY ]
|
elif [ -f $cert ]
|
||||||
then
|
then
|
||||||
printf "${NC}admin cert and key found, verifying the authenticity\n"
|
printf "${NC}${name} cert found, verifying the authenticity\n"
|
||||||
ADMINCERT_SUBJECT=$(openssl x509 -in $ADMINCERT -text | grep "Subject: CN"| tr -d " ")
|
CERT_SUBJECT=$(sudo openssl x509 -in $cert -text | grep "Subject: "| tr -d " ")
|
||||||
ADMINCERT_ISSUER=$(openssl x509 -in $ADMINCERT -text | grep "Issuer: CN"| tr -d " ")
|
CERT_ISSUER=$(sudo openssl x509 -in $cert -text | grep "Issuer: CN"| tr -d " ")
|
||||||
ADMINCERT_MD5=$(openssl x509 -noout -modulus -in $ADMINCERT | openssl md5| awk '{print $2}')
|
CERT_MD5=$(sudo openssl x509 -noout -modulus -in $cert | openssl md5| awk '{print $2}')
|
||||||
ADMINKEY_MD5=$(openssl rsa -noout -modulus -in $ADMINKEY | openssl md5| awk '{print $2}')
|
if [ $CERT_SUBJECT == "${subject}" ] && [ $CERT_ISSUER == "${issuer}" ]
|
||||||
if [ $ADMINCERT_SUBJECT == "Subject:CN=admin,O=system:masters" ] && [ $ADMINCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $ADMINCERT_MD5 == $ADMINKEY_MD5 ]
|
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}admin cert and key are correct\n"
|
printf "${SUCCESS}${name} cert is correct\n${NC}"
|
||||||
else
|
else
|
||||||
printf "${FAILED}Exiting...Found mismtach in the admin certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-admin-client-certificate\n"
|
printf "${FAILED}Exiting...Found mismtach in the ${name} certificate, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
printf "${FAILED}admin.crt / admin.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-admin-client-certificate\n"
|
printf "${FAILED}${cert} missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#certificate-authority\n${NC}"
|
||||||
exit 1
|
echo "These should be in ${CERT_LOCATION}${NC}"
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_kcm()
|
|
||||||
{
|
|
||||||
if [ -z $KCMCERT ] && [ -z $KCMKEY ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify cert and key location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $KCMCERT ] && [ -f $KCMKEY ]
|
|
||||||
then
|
|
||||||
printf "${NC}kube-controller-manager cert and key found, verifying the authenticity\n"
|
|
||||||
KCMCERT_SUBJECT=$(openssl x509 -in $KCMCERT -text | grep "Subject: CN"| tr -d " ")
|
|
||||||
KCMCERT_ISSUER=$(openssl x509 -in $KCMCERT -text | grep "Issuer: CN"| tr -d " ")
|
|
||||||
KCMCERT_MD5=$(openssl x509 -noout -modulus -in $KCMCERT | openssl md5| awk '{print $2}')
|
|
||||||
KCMKEY_MD5=$(openssl rsa -noout -modulus -in $KCMKEY | openssl md5| awk '{print $2}')
|
|
||||||
if [ $KCMCERT_SUBJECT == "Subject:CN=system:kube-controller-manager" ] && [ $KCMCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KCMCERT_MD5 == $KCMKEY_MD5 ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}kube-controller-manager cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-controller-manager-client-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}kube-controller-manager.crt / kube-controller-manager.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-controller-manager-client-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_kp()
|
|
||||||
{
|
|
||||||
if [ -z $KPCERT ] && [ -z $KPKEY ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify cert and key location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $KPCERT ] && [ -f $KPKEY ]
|
|
||||||
then
|
|
||||||
printf "${NC}kube-proxy cert and key found, verifying the authenticity\n"
|
|
||||||
KPCERT_SUBJECT=$(openssl x509 -in $KPCERT -text | grep "Subject: CN"| tr -d " ")
|
|
||||||
KPCERT_ISSUER=$(openssl x509 -in $KPCERT -text | grep "Issuer: CN"| tr -d " ")
|
|
||||||
KPCERT_MD5=$(openssl x509 -noout -modulus -in $KPCERT | openssl md5| awk '{print $2}')
|
|
||||||
KPKEY_MD5=$(openssl rsa -noout -modulus -in $KPKEY | openssl md5| awk '{print $2}')
|
|
||||||
if [ $KPCERT_SUBJECT == "Subject:CN=system:kube-proxy" ] && [ $KPCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KPCERT_MD5 == $KPKEY_MD5 ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}kube-proxy cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-proxy certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kube-proxy-client-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}kube-proxy.crt / kube-proxy.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kube-proxy-client-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_ks()
|
|
||||||
{
|
|
||||||
if [ -z $KSCERT ] && [ -z $KSKEY ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify cert and key location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $KSCERT ] && [ -f $KSKEY ]
|
|
||||||
then
|
|
||||||
printf "${NC}kube-scheduler cert and key found, verifying the authenticity\n"
|
|
||||||
KSCERT_SUBJECT=$(openssl x509 -in $KSCERT -text | grep "Subject: CN"| tr -d " ")
|
|
||||||
KSCERT_ISSUER=$(openssl x509 -in $KSCERT -text | grep "Issuer: CN"| tr -d " ")
|
|
||||||
KSCERT_MD5=$(openssl x509 -noout -modulus -in $KSCERT | openssl md5| awk '{print $2}')
|
|
||||||
KSKEY_MD5=$(openssl rsa -noout -modulus -in $KSKEY | openssl md5| awk '{print $2}')
|
|
||||||
if [ $KSCERT_SUBJECT == "Subject:CN=system:kube-scheduler" ] && [ $KSCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KSCERT_MD5 == $KSKEY_MD5 ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}kube-scheduler cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-scheduler certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-scheduler-client-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}kube-scheduler.crt / kube-scheduler.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-scheduler-client-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_api()
|
|
||||||
{
|
|
||||||
if [ -z $APICERT ] && [ -z $APIKEY ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify kube-api cert and key location, Exiting....\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $APICERT ] && [ -f $APIKEY ]
|
|
||||||
then
|
|
||||||
printf "${NC}kube-apiserver cert and key found, verifying the authenticity\n"
|
|
||||||
APICERT_SUBJECT=$(openssl x509 -in $APICERT -text | grep "Subject: CN"| tr -d " ")
|
|
||||||
APICERT_ISSUER=$(openssl x509 -in $APICERT -text | grep "Issuer: CN"| tr -d " ")
|
|
||||||
APICERT_MD5=$(openssl x509 -noout -modulus -in $APICERT | openssl md5| awk '{print $2}')
|
|
||||||
APIKEY_MD5=$(openssl rsa -noout -modulus -in $APIKEY | openssl md5| awk '{print $2}')
|
|
||||||
if [ $APICERT_SUBJECT == "Subject:CN=kube-apiserver" ] && [ $APICERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $APICERT_MD5 == $APIKEY_MD5 ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}kube-apiserver cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-apiserver certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kubernetes-api-server-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}kube-apiserver.crt / kube-apiserver.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-kubernetes-api-server-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_etcd()
|
|
||||||
{
|
|
||||||
if [ -z $ETCDCERT ] && [ -z $ETCDKEY ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify ETCD cert and key location, Exiting....\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $ETCDCERT ] && [ -f $ETCDKEY ]
|
|
||||||
then
|
|
||||||
printf "${NC}ETCD cert and key found, verifying the authenticity\n"
|
|
||||||
ETCDCERT_SUBJECT=$(openssl x509 -in $ETCDCERT -text | grep "Subject: CN"| tr -d " ")
|
|
||||||
ETCDCERT_ISSUER=$(openssl x509 -in $ETCDCERT -text | grep "Issuer: CN"| tr -d " ")
|
|
||||||
ETCDCERT_MD5=$(openssl x509 -noout -modulus -in $ETCDCERT | openssl md5| awk '{print $2}')
|
|
||||||
ETCDKEY_MD5=$(openssl rsa -noout -modulus -in $ETCDKEY | openssl md5| awk '{print $2}')
|
|
||||||
if [ $ETCDCERT_SUBJECT == "Subject:CN=etcd-server" ] && [ $ETCDCERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $ETCDCERT_MD5 == $ETCDKEY_MD5 ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}etcd-server.crt / etcd-server.key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the ETCD certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-etcd-server-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}etcd-server.crt / etcd-server.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-etcd-server-certificate\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_sa()
|
|
||||||
{
|
|
||||||
if [ -z $SACERT ] && [ -z $SAKEY ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify Service Account cert and key location, Exiting....\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $SACERT ] && [ -f $SAKEY ]
|
|
||||||
then
|
|
||||||
printf "${NC}service account cert and key found, verifying the authenticity\n"
|
|
||||||
SACERT_SUBJECT=$(openssl x509 -in $SACERT -text | grep "Subject: CN"| tr -d " ")
|
|
||||||
SACERT_ISSUER=$(openssl x509 -in $SACERT -text | grep "Issuer: CN"| tr -d " ")
|
|
||||||
SACERT_MD5=$(openssl x509 -noout -modulus -in $SACERT | openssl md5| awk '{print $2}')
|
|
||||||
SAKEY_MD5=$(openssl rsa -noout -modulus -in $SAKEY | openssl md5| awk '{print $2}')
|
|
||||||
if [ $SACERT_SUBJECT == "Subject:CN=service-accounts" ] && [ $SACERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $SACERT_MD5 == $SAKEY_MD5 ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}Service Account cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the Service Account certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-service-account-key-pair\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}service-account.crt / service-account.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/04-certificate-authority.md#the-service-account-key-pair\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
check_cert_kpkubeconfig()
|
|
||||||
{
|
|
||||||
if [ -z $KPKUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify kube-proxy kubeconfig location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $KPKUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${NC}kube-proxy kubeconfig file found, verifying the authenticity\n"
|
|
||||||
KPKUBECONFIG_SUBJECT=$(cat $KPKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
|
|
||||||
KPKUBECONFIG_ISSUER=$(cat $KPKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
|
|
||||||
KPKUBECONFIG_CERT_MD5=$(cat $KPKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
|
|
||||||
KPKUBECONFIG_KEY_MD5=$(cat $KPKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
|
|
||||||
KPKUBECONFIG_SERVER=$(cat $KPKUBECONFIG | grep "server:"| awk '{print $2}')
|
|
||||||
if [ $KPKUBECONFIG_SUBJECT == "Subject:CN=system:kube-proxy" ] && [ $KPKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KPKUBECONFIG_CERT_MD5 == $KPKUBECONFIG_KEY_MD5 ] && [ $KPKUBECONFIG_SERVER == "https://192.168.5.30:6443" ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}kube-proxy kubeconfig cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-proxy kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-proxy-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}kube-proxy kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-proxy-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_kcmkubeconfig()
|
|
||||||
{
|
|
||||||
if [ -z $KCMKUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify kube-controller-manager kubeconfig location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $KCMKUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${NC}kube-controller-manager kubeconfig file found, verifying the authenticity\n"
|
|
||||||
KCMKUBECONFIG_SUBJECT=$(cat $KCMKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
|
|
||||||
KCMKUBECONFIG_ISSUER=$(cat $KCMKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
|
|
||||||
KCMKUBECONFIG_CERT_MD5=$(cat $KCMKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
|
|
||||||
KCMKUBECONFIG_KEY_MD5=$(cat $KCMKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
|
|
||||||
KCMKUBECONFIG_SERVER=$(cat $KCMKUBECONFIG | grep "server:"| awk '{print $2}')
|
|
||||||
if [ $KCMKUBECONFIG_SUBJECT == "Subject:CN=system:kube-controller-manager" ] && [ $KCMKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KCMKUBECONFIG_CERT_MD5 == $KCMKUBECONFIG_KEY_MD5 ] && [ $KCMKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}kube-controller-manager kubeconfig cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-controller-manager-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}kube-controller-manager kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-controller-manager-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
check_cert_kskubeconfig()
|
|
||||||
{
|
|
||||||
if [ -z $KSKUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify kube-scheduler kubeconfig location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $KSKUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${NC}kube-scheduler kubeconfig file found, verifying the authenticity\n"
|
|
||||||
KSKUBECONFIG_SUBJECT=$(cat $KSKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
|
|
||||||
KSKUBECONFIG_ISSUER=$(cat $KSKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
|
|
||||||
KSKUBECONFIG_CERT_MD5=$(cat $KSKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
|
|
||||||
KSKUBECONFIG_KEY_MD5=$(cat $KSKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
|
|
||||||
KSKUBECONFIG_SERVER=$(cat $KSKUBECONFIG | grep "server:"| awk '{print $2}')
|
|
||||||
if [ $KSKUBECONFIG_SUBJECT == "Subject:CN=system:kube-scheduler" ] && [ $KSKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $KSKUBECONFIG_CERT_MD5 == $KSKUBECONFIG_KEY_MD5 ] && [ $KSKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}kube-scheduler kubeconfig cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-scheduler kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-scheduler-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}kube-scheduler kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md#the-kube-scheduler-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -387,17 +167,17 @@ check_cert_adminkubeconfig()
|
||||||
{
|
{
|
||||||
if [ -z $ADMINKUBECONFIG ]
|
if [ -z $ADMINKUBECONFIG ]
|
||||||
then
|
then
|
||||||
printf "${FAILED}please specify admin kubeconfig location\n"
|
printf "${FAILED}please specify admin kubeconfig location\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ -f $ADMINKUBECONFIG ]
|
elif [ -f $ADMINKUBECONFIG ]
|
||||||
then
|
then
|
||||||
printf "${NC}admin kubeconfig file found, verifying the authenticity\n"
|
printf "${NC}admin kubeconfig file found, verifying the authenticity\n"
|
||||||
ADMINKUBECONFIG_SUBJECT=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
|
ADMINKUBECONFIG_SUBJECT=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | sudo openssl x509 -text | grep "Subject: CN" | tr -d " ")
|
||||||
ADMINKUBECONFIG_ISSUER=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
|
ADMINKUBECONFIG_ISSUER=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | sudo openssl x509 -text | grep "Issuer: CN" | tr -d " ")
|
||||||
ADMINKUBECONFIG_CERT_MD5=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
|
ADMINKUBECONFIG_CERT_MD5=$(cat $ADMINKUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | sudo openssl x509 -noout | openssl md5 | awk '{print $2}')
|
||||||
ADMINKUBECONFIG_KEY_MD5=$(cat $ADMINKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
|
ADMINKUBECONFIG_KEY_MD5=$(cat $ADMINKUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
|
||||||
ADMINKUBECONFIG_SERVER=$(cat $ADMINKUBECONFIG | grep "server:"| awk '{print $2}')
|
ADMINKUBECONFIG_SERVER=$(cat $ADMINKUBECONFIG | grep "server:"| awk '{print $2}')
|
||||||
if [ $ADMINKUBECONFIG_SUBJECT == "Subject:CN=admin,O=system:masters" ] && [ $ADMINKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $ADMINKUBECONFIG_CERT_MD5 == $ADMINKUBECONFIG_KEY_MD5 ] && [ $ADMINKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
|
if [ $ADMINKUBECONFIG_SUBJECT == "Subject:CN=admin,O=system:masters" ] && [ $ADMINKUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA,O=Kubernetes" ] && [ $ADMINKUBECONFIG_CERT_MD5 == $ADMINKUBECONFIG_KEY_MD5 ] && [ $ADMINKUBECONFIG_SERVER == "https://127.0.0.1:6443" ]
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}admin kubeconfig cert and key are correct\n"
|
printf "${SUCCESS}admin kubeconfig cert and key are correct\n"
|
||||||
else
|
else
|
||||||
|
@ -410,11 +190,81 @@ check_cert_adminkubeconfig()
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
get_kubeconfig_cert_path()
|
||||||
|
{
|
||||||
|
local kubeconfig=$1
|
||||||
|
local cert_field=$2
|
||||||
|
|
||||||
|
sudo cat $kubeconfig | grep cert_field | awk '{print $2}'
|
||||||
|
}
|
||||||
|
|
||||||
|
check_kubeconfig()
|
||||||
|
{
|
||||||
|
local name=$1
|
||||||
|
local location=$2
|
||||||
|
local apiserver=$3
|
||||||
|
local kubeconfig="${location}/${name}.kubeconfig"
|
||||||
|
|
||||||
|
echo "Checking $kubeconfig"
|
||||||
|
check_kubeconfig_exists $name $location
|
||||||
|
ca=$(get_kubeconfig_cert_path $kubeconfig "certificate-authority")
|
||||||
|
cert=$(get_kubeconfig_cert_path $kubeconfig "client-certificate")
|
||||||
|
key=$(get_kubeconfig_cert_path $kubeconfig "client-key")
|
||||||
|
server=$(sudo cat $kubeconfig | grep server | awk '{print $2}')
|
||||||
|
|
||||||
|
if [ -f "$ca"]
|
||||||
|
then
|
||||||
|
printf "${SUCCESS}Path to CA certificate is correct${NC}\n"
|
||||||
|
else
|
||||||
|
printf "${FAIL}CA certificate not found at ${ca}${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$cert"]
|
||||||
|
then
|
||||||
|
printf "${SUCCESS}Path to client certificate is correct${NC}\n"
|
||||||
|
else
|
||||||
|
printf "${FAIL}Client certificate not found at ${cert}${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$key"]
|
||||||
|
then
|
||||||
|
printf "${SUCCESS}Path to client key is correct${NC}\n"
|
||||||
|
else
|
||||||
|
printf "${FAIL}Client key not found at ${key}${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$apiserver" = "$server" ]
|
||||||
|
then
|
||||||
|
printf "${SUCCESS}Server URL is correct${NC}\n"
|
||||||
|
else
|
||||||
|
printf "${FAIL}Server URL ${server} is incorrect${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_kubeconfig_exists() {
|
||||||
|
local name=$1
|
||||||
|
local location=$2
|
||||||
|
local kubeconfig="${location}/${name}.kubeconfig"
|
||||||
|
|
||||||
|
if [ -f "${kubeconfig}" ]
|
||||||
|
then
|
||||||
|
printf "${SUCCESS}${kubeconfig} found${NC}\n"
|
||||||
|
else
|
||||||
|
printf "${FAIL}${kubeconfig} not found!${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
check_systemd_etcd()
|
check_systemd_etcd()
|
||||||
{
|
{
|
||||||
if [ -z $ETCDCERT ] && [ -z $ETCDKEY ]
|
if [ -z $ETCDCERT ] && [ -z $ETCDKEY ]
|
||||||
then
|
then
|
||||||
printf "${FAILED}please specify ETCD cert and key location, Exiting....\n"
|
printf "${FAILED}please specify ETCD cert and key location, Exiting....\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ -f $SYSTEMD_ETCD_FILE ]
|
elif [ -f $SYSTEMD_ETCD_FILE ]
|
||||||
then
|
then
|
||||||
|
@ -430,7 +280,7 @@ check_systemd_etcd()
|
||||||
PEER_TRUSTED_CA_FILE=$(systemctl cat etcd.service | grep "\--peer-trusted-ca-file"| awk '{print $1}'| cut -d "=" -f2)
|
PEER_TRUSTED_CA_FILE=$(systemctl cat etcd.service | grep "\--peer-trusted-ca-file"| awk '{print $1}'| cut -d "=" -f2)
|
||||||
|
|
||||||
# Systemd advertise , client and peer url's
|
# Systemd advertise , client and peer url's
|
||||||
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
|
||||||
IAP_URL=$(systemctl cat etcd.service | grep "\--initial-advertise-peer-urls"| awk '{print $2}')
|
IAP_URL=$(systemctl cat etcd.service | grep "\--initial-advertise-peer-urls"| awk '{print $2}')
|
||||||
LP_URL=$(systemctl cat etcd.service | grep "\--listen-peer-urls"| awk '{print $2}')
|
LP_URL=$(systemctl cat etcd.service | grep "\--listen-peer-urls"| awk '{print $2}')
|
||||||
LC_URL=$(systemctl cat etcd.service | grep "\--listen-client-urls"| awk '{print $2}')
|
LC_URL=$(systemctl cat etcd.service | grep "\--listen-client-urls"| awk '{print $2}')
|
||||||
|
@ -443,23 +293,23 @@ check_systemd_etcd()
|
||||||
if [ $CERT_FILE == $ETCDCERT ] && [ $KEY_FILE == $ETCDKEY ] && [ $PEER_CERT_FILE == $ETCDCERT ] && [ $PEER_KEY_FILE == $ETCDKEY ] && \
|
if [ $CERT_FILE == $ETCDCERT ] && [ $KEY_FILE == $ETCDKEY ] && [ $PEER_CERT_FILE == $ETCDCERT ] && [ $PEER_KEY_FILE == $ETCDKEY ] && \
|
||||||
[ $TRUSTED_CA_FILE == $ETCD_CA_CERT ] && [ $PEER_TRUSTED_CA_FILE = $ETCD_CA_CERT ]
|
[ $TRUSTED_CA_FILE == $ETCD_CA_CERT ] && [ $PEER_TRUSTED_CA_FILE = $ETCD_CA_CERT ]
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}ETCD certificate, ca and key files are correct under systemd service\n"
|
printf "${SUCCESS}ETCD certificate, ca and key files are correct under systemd service\n${NC}"
|
||||||
else
|
else
|
||||||
printf "${FAILED}Exiting...Found mismtach in the ETCD certificate, ca and keys. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n"
|
printf "${FAILED}Exiting...Found mismtach in the ETCD certificate, ca and keys. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $IAP_URL == "https://$INTERNAL_IP:2380" ] && [ $LP_URL == "https://$INTERNAL_IP:2380" ] && [ $LC_URL == "https://$INTERNAL_IP:2379,https://127.0.0.1:2379" ] && \
|
if [ $IAP_URL == "https://$INTERNAL_IP:2380" ] && [ $LP_URL == "https://$INTERNAL_IP:2380" ] && [ $LC_URL == "https://$INTERNAL_IP:2379,https://127.0.0.1:2379" ] && \
|
||||||
[ $AC_URL == "https://$INTERNAL_IP:2379" ]
|
[ $AC_URL == "https://$INTERNAL_IP:2379" ]
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}ETCD initial-advertise-peer-urls, listen-peer-urls, listen-client-urls, advertise-client-urls are correct\n"
|
printf "${SUCCESS}ETCD initial-advertise-peer-urls, listen-peer-urls, listen-client-urls, advertise-client-urls are correct\n${NC}"
|
||||||
else
|
else
|
||||||
printf "${FAILED}Exiting...Found mismtach in the ETCD initial-advertise-peer-urls / listen-peer-urls / listen-client-urls / advertise-client-urls. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n"
|
printf "${FAILED}Exiting...Found mismtach in the ETCD initial-advertise-peer-urls / listen-peer-urls / listen-client-urls / advertise-client-urls. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
else
|
else
|
||||||
printf "${FAILED}etcd-server.crt / etcd-server.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n"
|
printf "${FAILED}etcd-server.crt / etcd-server.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/07-bootstrapping-etcd.md#configure-the-etcd-server\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -468,13 +318,12 @@ check_systemd_api()
|
||||||
{
|
{
|
||||||
if [ -z $APICERT ] && [ -z $APIKEY ]
|
if [ -z $APICERT ] && [ -z $APIKEY ]
|
||||||
then
|
then
|
||||||
printf "${FAILED}please specify kube-api cert and key location, Exiting....\n"
|
printf "${FAILED}please specify kube-api cert and key location, Exiting....\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ -f $SYSTEMD_API_FILE ]
|
elif [ -f $SYSTEMD_API_FILE ]
|
||||||
then
|
then
|
||||||
printf "${NC}Systemd for kube-api service found, verifying the authenticity\n"
|
printf "Systemd for kube-api service found, verifying the authenticity\n"
|
||||||
|
|
||||||
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
|
||||||
ADVERTISE_ADDRESS=$(systemctl cat kube-apiserver.service | grep "\--advertise-address" | awk '{print $1}' | cut -d "=" -f2)
|
ADVERTISE_ADDRESS=$(systemctl cat kube-apiserver.service | grep "\--advertise-address" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
CLIENT_CA_FILE=$(systemctl cat kube-apiserver.service | grep "\--client-ca-file" | awk '{print $1}' | cut -d "=" -f2)
|
CLIENT_CA_FILE=$(systemctl cat kube-apiserver.service | grep "\--client-ca-file" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
ETCD_CA_FILE=$(systemctl cat kube-apiserver.service | grep "\--etcd-cafile" | awk '{print $1}' | cut -d "=" -f2)
|
ETCD_CA_FILE=$(systemctl cat kube-apiserver.service | grep "\--etcd-cafile" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
|
@ -487,41 +336,44 @@ check_systemd_api()
|
||||||
TLS_CERT_FILE=$(systemctl cat kube-apiserver.service | grep "\--tls-cert-file" | awk '{print $1}' | cut -d "=" -f2)
|
TLS_CERT_FILE=$(systemctl cat kube-apiserver.service | grep "\--tls-cert-file" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
TLS_PRIVATE_KEY_FILE=$(systemctl cat kube-apiserver.service | grep "\--tls-private-key-file" | awk '{print $1}' | cut -d "=" -f2)
|
TLS_PRIVATE_KEY_FILE=$(systemctl cat kube-apiserver.service | grep "\--tls-private-key-file" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
|
|
||||||
CACERT=/var/lib/kubernetes/ca.crt
|
PKI=/var/lib/kubernetes/pki
|
||||||
APICERT=/var/lib/kubernetes/kube-apiserver.crt
|
CACERT="${PKI}/ca.crt"
|
||||||
APIKEY=/var/lib/kubernetes/kube-apiserver.key
|
APICERT="${PKI}/kube-apiserver.crt"
|
||||||
SACERT=/var/lib/kubernetes/service-account.crt
|
APIKEY="${PKI}/kube-apiserver.key"
|
||||||
|
SACERT="${PKI}/service-account.crt"
|
||||||
|
KCCERT="${PKI}/apiserver-kubelet-client.crt"
|
||||||
|
KCKEY="${PKI}/apiserver-kubelet-client.key"
|
||||||
if [ $ADVERTISE_ADDRESS == $INTERNAL_IP ] && [ $CLIENT_CA_FILE == $CACERT ] && [ $ETCD_CA_FILE == $CACERT ] && \
|
if [ $ADVERTISE_ADDRESS == $INTERNAL_IP ] && [ $CLIENT_CA_FILE == $CACERT ] && [ $ETCD_CA_FILE == $CACERT ] && \
|
||||||
[ $ETCD_CERT_FILE == "/var/lib/kubernetes/etcd-server.crt" ] && [ $ETCD_KEY_FILE == "/var/lib/kubernetes/etcd-server.key" ] && \
|
[ $ETCD_CERT_FILE == "${PKI}/etcd-server.crt" ] && [ $ETCD_KEY_FILE == "${PKI}/etcd-server.key" ] && \
|
||||||
[ $KUBELET_CERTIFICATE_AUTHORITY == $CACERT ] && [ $KUBELET_CLIENT_CERTIFICATE == $APICERT ] && [ $KUBELET_CLIENT_KEY == $APIKEY ] && \
|
[ $KUBELET_CERTIFICATE_AUTHORITY == $CACERT ] && [ $KUBELET_CLIENT_CERTIFICATE == $KCCERT ] && [ $KUBELET_CLIENT_KEY == $KCKEY ] && \
|
||||||
[ $SERVICE_ACCOUNT_KEY_FILE == $SACERT ] && [ $TLS_CERT_FILE == $APICERT ] && [ $TLS_PRIVATE_KEY_FILE == $APIKEY ]
|
[ $SERVICE_ACCOUNT_KEY_FILE == $SACERT ] && [ $TLS_CERT_FILE == $APICERT ] && [ $TLS_PRIVATE_KEY_FILE == $APIKEY ]
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}kube-apiserver advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file are correct\n"
|
printf "${SUCCESS}kube-apiserver advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file are correct\n${NC}"
|
||||||
else
|
else
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-apiserver systemd file, check advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n"
|
printf "${FAILED}Exiting...Found mismtach in the kube-apiserver systemd file, check advertise-address/ client-ca-file/ etcd-cafile/ etcd-certfile/ etcd-keyfile/ kubelet-certificate-authority/ kubelet-client-certificate/ kubelet-client-key/ service-account-key-file/ tls-cert-file/ tls-private-key-file. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
printf "${FAILED}kube-apiserver.crt / kube-apiserver.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n"
|
printf "${FAILED}kube-apiserver.crt / kube-apiserver.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
check_systemd_kcm()
|
check_systemd_kcm()
|
||||||
{
|
{
|
||||||
KCMCERT=/var/lib/kubernetes/kube-controller-manager.crt
|
KCMCERT=/var/lib/kubernetes/pki/kube-controller-manager.crt
|
||||||
KCMKEY=/var/lib/kubernetes/kube-controller-manager.key
|
KCMKEY=/var/lib/kubernetes/pki/kube-controller-manager.key
|
||||||
CACERT=/var/lib/kubernetes/ca.crt
|
CACERT=/var/lib/kubernetes/pki/ca.crt
|
||||||
CAKEY=/var/lib/kubernetes/ca.key
|
CAKEY=/var/lib/kubernetes/pki/ca.key
|
||||||
SAKEY=/var/lib/kubernetes/service-account.key
|
SAKEY=/var/lib/kubernetes/pki/service-account.key
|
||||||
KCMKUBECONFIG=/var/lib/kubernetes/kube-controller-manager.kubeconfig
|
KCMKUBECONFIG=/var/lib/kubernetes/kube-controller-manager.kubeconfig
|
||||||
if [ -z $KCMCERT ] && [ -z $KCMKEY ]
|
if [ -z $KCMCERT ] && [ -z $KCMKEY ]
|
||||||
then
|
then
|
||||||
printf "${FAILED}please specify cert and key location\n"
|
printf "${FAILED}please specify cert and key location\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ -f $SYSTEMD_KCM_FILE ]
|
elif [ -f $SYSTEMD_KCM_FILE ]
|
||||||
then
|
then
|
||||||
printf "${NC}Systemd for kube-controller-manager service found, verifying the authenticity\n"
|
printf "Systemd for kube-controller-manager service found, verifying the authenticity\n"
|
||||||
CLUSTER_SIGNING_CERT_FILE=$(systemctl cat kube-controller-manager.service | grep "\--cluster-signing-cert-file" | awk '{print $1}' | cut -d "=" -f2)
|
CLUSTER_SIGNING_CERT_FILE=$(systemctl cat kube-controller-manager.service | grep "\--cluster-signing-cert-file" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
CLUSTER_SIGNING_KEY_FILE=$(systemctl cat kube-controller-manager.service | grep "\--cluster-signing-key-file" | awk '{print $1}' | cut -d "=" -f2)
|
CLUSTER_SIGNING_KEY_FILE=$(systemctl cat kube-controller-manager.service | grep "\--cluster-signing-key-file" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
KUBECONFIG=$(systemctl cat kube-controller-manager.service | grep "\--kubeconfig" | awk '{print $1}' | cut -d "=" -f2)
|
KUBECONFIG=$(systemctl cat kube-controller-manager.service | grep "\--kubeconfig" | awk '{print $1}' | cut -d "=" -f2)
|
||||||
|
@ -531,242 +383,180 @@ check_systemd_kcm()
|
||||||
if [ $CLUSTER_SIGNING_CERT_FILE == $CACERT ] && [ $CLUSTER_SIGNING_KEY_FILE == $CAKEY ] && [ $KUBECONFIG == $KCMKUBECONFIG ] && \
|
if [ $CLUSTER_SIGNING_CERT_FILE == $CACERT ] && [ $CLUSTER_SIGNING_KEY_FILE == $CAKEY ] && [ $KUBECONFIG == $KCMKUBECONFIG ] && \
|
||||||
[ $ROOT_CA_FILE == $CACERT ] && [ $SERVICE_ACCOUNT_PRIVATE_KEY_FILE == $SAKEY ]
|
[ $ROOT_CA_FILE == $CACERT ] && [ $SERVICE_ACCOUNT_PRIVATE_KEY_FILE == $SAKEY ]
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file are correct\n"
|
printf "${SUCCESS}kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file are correct\n${NC}"
|
||||||
else
|
else
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file ,More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n"
|
printf "${FAILED}Exiting...Found mismtach in the kube-controller-manager cluster-signing-cert-file, cluster-signing-key-file, kubeconfig, root-ca-file, service-account-private-key-file. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
printf "${FAILED}kube-controller-manager.crt / kube-controller-manager.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n"
|
printf "${FAILED}kube-controller-manager.crt / kube-controller-manager.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-controller-manager\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
check_systemd_ks()
|
check_systemd_ks()
|
||||||
{
|
{
|
||||||
KSCERT=/var/lib/kubernetes/kube-scheduler.crt
|
KSCERT=/var/lib/kubernetes/pki/kube-scheduler.crt
|
||||||
KSKEY=/var/lib/kubernetes/kube-scheduler.key
|
KSKEY=/var/lib/kubernetes/pki/kube-scheduler.key
|
||||||
KSKUBECONFIG=/var/lib/kubernetes/kube-scheduler.kubeconfig
|
KSKUBECONFIG=/var/lib/kubernetes/kube-scheduler.kubeconfig
|
||||||
|
|
||||||
if [ -z $KSCERT ] && [ -z $KSKEY ]
|
if [ -z $KSCERT ] && [ -z $KSKEY ]
|
||||||
then
|
then
|
||||||
printf "${FAILED}please specify cert and key location\n"
|
printf "${FAILED}please specify cert and key location\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ -f $SYSTEMD_KS_FILE ]
|
elif [ -f $SYSTEMD_KS_FILE ]
|
||||||
then
|
then
|
||||||
printf "${NC}Systemd for kube-scheduler service found, verifying the authenticity\n"
|
printf "Systemd for kube-scheduler service found, verifying the authenticity\n"
|
||||||
|
|
||||||
KUBECONFIG=$(systemctl cat kube-scheduler.service | grep "\--kubeconfig"| awk '{print $1}'| cut -d "=" -f2)
|
KUBECONFIG=$(systemctl cat kube-scheduler.service | grep "\--kubeconfig"| awk '{print $1}'| cut -d "=" -f2)
|
||||||
ADDRESS=$(systemctl cat kube-scheduler.service | grep "\--address"| awk '{print $1}'| cut -d "=" -f2)
|
|
||||||
|
|
||||||
if [ $KUBECONFIG == $KSKUBECONFIG ] && [ $ADDRESS == "127.0.0.1" ]
|
if [ $KUBECONFIG == $KSKUBECONFIG ]
|
||||||
then
|
then
|
||||||
printf "${SUCCESS}kube-scheduler --kubeconfig, --address are correct\n"
|
printf "${SUCCESS}kube-scheduler --kubeconfig is correct\n${NC}"
|
||||||
else
|
else
|
||||||
printf "${FAILED}Exiting...Found mismtach in the kube-scheduler --kubeconfig, --address, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n"
|
printf "${FAILED}Exiting...Found mismtach in the kube-scheduler --kubeconfig. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
printf "${FAILED}kube-scheduler.crt / kube-scheduler.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n"
|
printf "${FAILED}kube-scheduler.crt / kube-scheduler.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-scheduler\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# END OF Function - Master node #
|
# END OF Function - Master node #
|
||||||
|
|
||||||
# Function - Worker-1 node #
|
|
||||||
|
|
||||||
check_cert_worker_1()
|
echo "This script will validate the certificates in master as well as worker-1 nodes. Before proceeding, make sure you ssh into the respective node [ Master or Worker-1 ] for certificate validation"
|
||||||
{
|
echo
|
||||||
if [ -z $WORKER_1_CERT ] && [ -z $WORKER_1_KEY ]
|
echo " 1. Verify certificates on Master Nodes after step 4"
|
||||||
then
|
echo " 2. Verify kubeconfigs on Master Nodes after step 5"
|
||||||
printf "${FAILED}please specify cert and key location of worker-1 node\n"
|
echo " 3. Verify kubeconfigs and PKI on Master Nodes after step 8"
|
||||||
exit 1
|
echo " 4. Verify kubeconfigs and PKI on worker-1 Node after step 10"
|
||||||
elif [ -f $WORKER_1_CERT ] && [ -f $WORKER_1_KEY ]
|
echo " 5. Verify kubeconfigs and PKI on worker-2 Node after step 11"
|
||||||
then
|
echo
|
||||||
printf "${NC}worker-1 cert and key found, verifying the authenticity\n"
|
echo -n "Please select one of the above options: "
|
||||||
WORKER_1_CERT_SUBJECT=$(openssl x509 -in $WORKER_1_CERT -text | grep "Subject: CN"| tr -d " ")
|
|
||||||
WORKER_1_CERT_ISSUER=$(openssl x509 -in $WORKER_1_CERT -text | grep "Issuer: CN"| tr -d " ")
|
|
||||||
WORKER_1_CERT_MD5=$(openssl x509 -noout -modulus -in $WORKER_1_CERT | openssl md5| awk '{print $2}')
|
|
||||||
WORKER_1_KEY_MD5=$(openssl rsa -noout -modulus -in $WORKER_1_KEY | openssl md5| awk '{print $2}')
|
|
||||||
if [ $WORKER_1_CERT_SUBJECT == "Subject:CN=system:node:worker-1,O=system:nodes" ] && [ $WORKER_1_CERT_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && [ $WORKER_1_CERT_MD5 == $WORKER_1_KEY_MD5 ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}worker-1 cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the worker-1 certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#provisioning--kubelet-client-certificates\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}/var/lib/kubelet/worker-1.crt / /var/lib/kubelet/worker-1.key is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#provisioning--kubelet-client-certificates\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_worker_1_kubeconfig()
|
|
||||||
{
|
|
||||||
if [ -z $WORKER_1_KUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify worker-1 kubeconfig location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $WORKER_1_KUBECONFIG ]
|
|
||||||
then
|
|
||||||
printf "${NC}worker-1 kubeconfig file found, verifying the authenticity\n"
|
|
||||||
WORKER_1_KUBECONFIG_SUBJECT=$(cat $WORKER_1_KUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Subject: CN" | tr -d " ")
|
|
||||||
WORKER_1_KUBECONFIG_ISSUER=$(cat $WORKER_1_KUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -text | grep "Issuer: CN" | tr -d " ")
|
|
||||||
WORKER_1_KUBECONFIG_CERT_MD5=$(cat $WORKER_1_KUBECONFIG | grep "client-certificate-data:" | awk '{print $2}' | base64 --decode | openssl x509 -noout | openssl md5 | awk '{print $2}')
|
|
||||||
WORKER_1_KUBECONFIG_KEY_MD5=$(cat $WORKER_1_KUBECONFIG | grep "client-key-data" | awk '{print $2}' | base64 --decode | openssl rsa -noout | openssl md5 | awk '{print $2}')
|
|
||||||
WORKER_1_KUBECONFIG_SERVER=$(cat $WORKER_1_KUBECONFIG | grep "server:"| awk '{print $2}')
|
|
||||||
if [ $WORKER_1_KUBECONFIG_SUBJECT == "Subject:CN=system:node:worker-1,O=system:nodes" ] && [ $WORKER_1_KUBECONFIG_ISSUER == "Issuer:CN=KUBERNETES-CA" ] && \
|
|
||||||
[ $WORKER_1_KUBECONFIG_CERT_MD5 == $WORKER_1_KUBECONFIG_KEY_MD5 ] && [ $WORKER_1_KUBECONFIG_SERVER == "https://192.168.5.30:6443" ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}worker-1 kubeconfig cert and key are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the worker-1 kubeconfig certificate and keys, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#the-kubelet-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "${FAILED}worker-1 /var/lib/kubelet/kubeconfig file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#the-kubelet-kubernetes-configuration-file\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_worker_1_kubelet()
|
|
||||||
{
|
|
||||||
|
|
||||||
CACERT=/var/lib/kubernetes/ca.crt
|
|
||||||
WORKER_1_TLSCERTFILE=/var/lib/kubelet/${HOSTNAME}.crt
|
|
||||||
WORKER_1_TLSPRIVATEKEY=/var/lib/kubelet/${HOSTNAME}.key
|
|
||||||
|
|
||||||
if [ -z $WORKER_1_KUBELET ] && [ -z $SYSTEMD_WORKER_1_KUBELET ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify worker-1 kubelet config location\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $WORKER_1_KUBELET ] && [ -f $SYSTEMD_WORKER_1_KUBELET ] && [ -f $WORKER_1_TLSCERTFILE ] && [ -f $WORKER_1_TLSPRIVATEKEY ]
|
|
||||||
then
|
|
||||||
printf "${NC}worker-1 kubelet config file, systemd services, tls cert and key found, verifying the authenticity\n"
|
|
||||||
|
|
||||||
WORKER_1_KUBELET_CA=$(cat $WORKER_1_KUBELET | grep "clientCAFile:" | awk '{print $2}' | tr -d " \"")
|
|
||||||
WORKER_1_KUBELET_DNS=$(cat $WORKER_1_KUBELET | grep "resolvConf:" | awk '{print $2}' | tr -d " \"")
|
|
||||||
WORKER_1_KUBELET_AUTH_MODE=$(cat $WORKER_1_KUBELET | grep "mode:" | awk '{print $2}' | tr -d " \"")
|
|
||||||
|
|
||||||
if [ $WORKER_1_KUBELET_CA == $CACERT ] && [ $WORKER_1_KUBELET_DNS == "/run/systemd/resolve/resolv.conf" ] && \
|
|
||||||
[ $WORKER_1_KUBELET_AUTH_MODE == "Webhook" ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}worker-1 kubelet config CA cert, resolvConf and Auth mode are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the worker-1 kubelet config CA cert, resolvConf and Auth mode, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubelet\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
KUBELETCONFIG=$(systemctl cat kubelet.service | grep "\--config" | awk '{print $1}'| cut -d "=" -f2)
|
|
||||||
TLSCERTFILE=$(systemctl cat kubelet.service | grep "\--tls-cert-file" | awk '{print $1}'| cut -d "=" -f2)
|
|
||||||
TLSPRIVATEKEY=$(systemctl cat kubelet.service | grep "\--tls-private-key-file" | awk '{print $1}'| cut -d "=" -f2)
|
|
||||||
|
|
||||||
if [ $KUBELETCONFIG == $WORKER_1_KUBELET ] && [ $TLSCERTFILE == $WORKER_1_TLSCERTFILE ] && \
|
|
||||||
[ $TLSPRIVATEKEY == $WORKER_1_TLSPRIVATEKEY ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}worker-1 kubelet systemd services are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the worker-1 kubelet systemd services, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubelet\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
else
|
|
||||||
printf "${FAILED}worker-1 kubelet config, systemd services, tls cert and key file is missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cert_worker_1_kp()
|
|
||||||
{
|
|
||||||
|
|
||||||
WORKER_1_KP_CONFIG_YAML=/var/lib/kube-proxy/kube-proxy-config.yaml
|
|
||||||
|
|
||||||
if [ -z $WORKER_1_KP_KUBECONFIG ] && [ -z $SYSTEMD_WORKER_1_KP ]
|
|
||||||
then
|
|
||||||
printf "${FAILED}please specify worker-1 kube-proxy config and systemd service path\n"
|
|
||||||
exit 1
|
|
||||||
elif [ -f $WORKER_1_KP_KUBECONFIG ] && [ -f $SYSTEMD_WORKER_1_KP ] && [ -f $WORKER_1_KP_CONFIG_YAML ]
|
|
||||||
then
|
|
||||||
printf "${NC}worker-1 kube-proxy kubeconfig, systemd services and configuration files found, verifying the authenticity\n"
|
|
||||||
|
|
||||||
KP_CONFIG=$(cat $WORKER_1_KP_CONFIG_YAML | grep "kubeconfig:" | awk '{print $2}' | tr -d " \"")
|
|
||||||
KP_CONFIG_YAML=$(systemctl cat kube-proxy.service | grep "\--config" | awk '{print $1}'| cut -d "=" -f2)
|
|
||||||
|
|
||||||
if [ $KP_CONFIG == $WORKER_1_KP_KUBECONFIG ] && [ $KP_CONFIG_YAML == $WORKER_1_KP_CONFIG_YAML ]
|
|
||||||
then
|
|
||||||
printf "${SUCCESS}worker-1 kube-proxy kubeconfig and configuration files are correct\n"
|
|
||||||
else
|
|
||||||
printf "${FAILED}Exiting...Found mismtach in the worker-1 kube-proxy kubeconfig and configuration files, More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubernetes-proxy\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
else
|
|
||||||
printf "${FAILED}worker-1 kube-proxy kubeconfig and configuration files are missing. More details: https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/09-bootstrapping-kubernetes-workers.md#configure-the-kubernetes-proxy\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# END OF Function - Worker-1 node #
|
|
||||||
|
|
||||||
echo -e "This script will validate the certificates in master as well as worker-1 nodes. Before proceeding, make sure you ssh into the respective node [ Master or Worker-1 ] for certificate validation\n"
|
|
||||||
echo -e "1. Verify certification in Master Node\n"
|
|
||||||
echo -e "2. Verify certification in Worker-1 Node\n"
|
|
||||||
echo -e "Please select either the option 1 or 2\n"
|
|
||||||
read value
|
read value
|
||||||
|
|
||||||
|
HOST=$(hostname -s)
|
||||||
|
|
||||||
|
CERT_ISSUER="Issuer:CN=KUBERNETES-CA,O=Kubernetes"
|
||||||
|
SUBJ_CA="Subject:CN=KUBERNETES-CA,O=Kubernetes"
|
||||||
|
SUBJ_ADMIN="Subject:CN=admin,O=system:masters"
|
||||||
|
SUBJ_KCM="Subject:CN=system:kube-controller-manager,O=system:kube-controller-manager"
|
||||||
|
SUBJ_KP="Subject:CN=system:kube-proxy,O=system:node-proxier"
|
||||||
|
SUBJ_KS="Subject:CN=system:kube-scheduler,O=system:kube-scheduler"
|
||||||
|
SUBJ_API="Subject:CN=kube-apiserver,O=Kubernetes"
|
||||||
|
SUBJ_SA="Subject:CN=service-accounts,O=Kubernetes"
|
||||||
|
SUBJ_ETCD="Subject:CN=etcd-server,O=Kubernetes"
|
||||||
|
SUBJ_APIKC="Subject:CN=kube-apiserver-kubelet-client,O=system:masters"
|
||||||
|
|
||||||
case $value in
|
case $value in
|
||||||
|
|
||||||
1)
|
1)
|
||||||
|
if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ]
|
||||||
|
then
|
||||||
|
printf "${FAILED}Must run on master-1 or master-2${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo -e "The selected option is $value, proceeding the certificate verification of Master node"
|
echo -e "The selected option is $value, proceeding the certificate verification of Master node"
|
||||||
|
|
||||||
### MASTER NODES ###
|
CERT_LOCATION=$HOME
|
||||||
master_hostname=$(hostname -s)
|
check_cert_and_key "ca" $SUBJ_CA $CERT_ISSUER
|
||||||
# CRT & KEY verification
|
check_cert_and_key "kube-apiserver" $SUBJ_API $CERT_ISSUER
|
||||||
check_cert_ca
|
check_cert_and_key "kube-controller-manager" $SUBJ_KCM $CERT_ISSUER
|
||||||
|
check_cert_and_key "kube-scheduler" $SUBJ_KS $CERT_ISSUER
|
||||||
|
check_cert_and_key "service-account" $SUBJ_SA $CERT_ISSUER
|
||||||
|
check_cert_and_key "apiserver-kubelet-client" $SUBJ_APIKC $CERT_ISSUER
|
||||||
|
check_cert_and_key "etcd-server" $SUBJ_ETCD $CERT_ISSUER
|
||||||
|
|
||||||
if [ $master_hostname == "master-1" ]
|
if [ "${HOST}" = "master-1" ]
|
||||||
then
|
then
|
||||||
check_cert_admin
|
check_cert_and_key "admin" $SUBJ_ADMIN $CERT_ISSUER
|
||||||
check_cert_kcm
|
check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER
|
||||||
check_cert_kp
|
|
||||||
check_cert_ks
|
|
||||||
check_cert_adminkubeconfig
|
|
||||||
check_cert_kpkubeconfig
|
|
||||||
fi
|
fi
|
||||||
check_cert_api
|
|
||||||
check_cert_sa
|
|
||||||
check_cert_etcd
|
|
||||||
|
|
||||||
# Kubeconfig verification
|
|
||||||
check_cert_kcmkubeconfig
|
|
||||||
check_cert_kskubeconfig
|
|
||||||
|
|
||||||
# Systemd verification
|
|
||||||
check_systemd_etcd
|
|
||||||
check_systemd_api
|
|
||||||
check_systemd_kcm
|
|
||||||
check_systemd_ks
|
|
||||||
|
|
||||||
### END OF MASTER NODES ###
|
|
||||||
|
|
||||||
;;
|
;;
|
||||||
|
|
||||||
2)
|
2)
|
||||||
echo -e "The selected option is $value, proceeding the certificate verification of Worker-1 node"
|
if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ]
|
||||||
|
then
|
||||||
|
printf "${FAILED}Must run on master-1 or master-2${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
### WORKER-1 NODE ###
|
check_cert_adminkubeconfig
|
||||||
|
check_kubeconfig_exists "kube-controller-manager" $HOME
|
||||||
|
check_kubeconfig_exists "kube-scheduler" $HOME
|
||||||
|
|
||||||
check_cert_worker_1
|
if [ "${HOST}" = "master-1" ]
|
||||||
check_cert_worker_1_kubeconfig
|
then
|
||||||
check_cert_worker_1_kubelet
|
check_kubeconfig_exists "kube-proxy" $HOME
|
||||||
check_cert_worker_1_kp
|
fi
|
||||||
|
|
||||||
### END OF WORKER-1 NODE ###
|
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
3)
|
||||||
|
if ! [ "${HOST}" = "master-1" -o "${HOST}" = "master-2" ]
|
||||||
|
then
|
||||||
|
printf "${FAILED}Must run on master-1 or master-2${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERT_LOCATION=/etc/etcd
|
||||||
|
check_cert_only "ca" $SUBJ_CA $CERT_ISSUER
|
||||||
|
check_cert_and_key "etcd-server" $SUBJ_ETCD $CERT_ISSUER
|
||||||
|
|
||||||
|
CERT_LOCATION=/var/lib/kubernetes/pki
|
||||||
|
check_cert_and_key "ca" $SUBJ_CA $CERT_ISSUER
|
||||||
|
check_cert_and_key "kube-apiserver" $SUBJ_API $CERT_ISSUER
|
||||||
|
check_cert_and_key "kube-controller-manager" $SUBJ_KCM $CERT_ISSUER
|
||||||
|
check_cert_and_key "kube-scheduler" $SUBJ_KS $CERT_ISSUER
|
||||||
|
check_cert_and_key "service-account" $SUBJ_SA $CERT_ISSUER
|
||||||
|
check_cert_and_key "apiserver-kubelet-client" $SUBJ_APIKC $CERT_ISSUER
|
||||||
|
check_cert_and_key "etcd-server" $SUBJ_ETCD $CERT_ISSUER
|
||||||
|
|
||||||
|
check_kubeconfig "kube-controller-manager" "/var/lib/kubernetes" "https://127.0.0.1:6443"
|
||||||
|
check_kubeconfig "kube-scheduler" "/var/lib/kubernetes" "https://127.0.0.1:6443"
|
||||||
|
|
||||||
|
check_systemd_api
|
||||||
|
check_systemd_etcd
|
||||||
|
check_systemd_kcm
|
||||||
|
check_systemd_ks
|
||||||
|
;;
|
||||||
|
|
||||||
|
4)
|
||||||
|
if ! [ "${HOST}" = "worker-1" ]
|
||||||
|
then
|
||||||
|
printf "${FAILED}Must run on worker-1${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERT_LOCATION=/var/lib/kubernetes/pki
|
||||||
|
check_cert_only "ca" $SUBJ_CA $CERT_ISSUER
|
||||||
|
check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER
|
||||||
|
check_cert_and_key "worker-1" "Subject:CN=system:node:worker-1,O=system:nodes" $CERT_ISSUER
|
||||||
|
check_kubeconfig "kube-proxy" "/var/lib/kube-proxy" "https://${LOADBALANCER}:6443"
|
||||||
|
check_kubeconfig "kubelet" "/var/lib/kubelet" "https://${LOADBALANCER}:6443"
|
||||||
|
;;
|
||||||
|
|
||||||
|
5)
|
||||||
|
if ! [ "${HOST}" = "worker-2" ]
|
||||||
|
then
|
||||||
|
printf "${FAILED}Must run on worker-2${NC}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERT_LOCATION=/var/lib/kubernetes/pki
|
||||||
|
check_cert_only "ca" $SUBJ_CA $CERT_ISSUER
|
||||||
|
check_cert_and_key "kube-proxy" $SUBJ_KP $CERT_ISSUER
|
||||||
|
|
||||||
|
CERT_LOCATION=/var/lib/kubelet/pki
|
||||||
|
check_cert_only "kubelet-client-current" "Subject:O=system:nodes,CN=system:node:worker-2" $CERT_ISSUER
|
||||||
|
check_kubeconfig "kube-proxy" "/var/lib/kube-proxy" "https://${LOADBALANCER}:6443"
|
||||||
|
;;
|
||||||
|
|
||||||
|
|
||||||
*)
|
*)
|
||||||
printf "${FAILED}Exiting.... Please select the valid option either 1 or 2\n"
|
printf "${FAILED}Exiting.... Please select the valid option either 1 or 2\n${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
cd /tmp
|
|
||||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
|
||||||
sh /tmp/get-docker.sh
|
|
|
@ -1,15 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
apt-transport-https \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
software-properties-common \
|
|
||||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
|
||||||
&& add-apt-repository \
|
|
||||||
"deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \
|
|
||||||
$(lsb_release -cs) \
|
|
||||||
stable" \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 18.06 | head -1 | awk '{print $3}')
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Sets up the kernel with the requirements for running Kubernetes
|
||||||
|
# Requires a reboot, which is carried out by the vagrant provisioner.
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# Disable cgroups v2 (kernel command line parameter)
|
||||||
|
sed -i 's/GRUB_CMDLINE_LINUX_DEFAULT="/GRUB_CMDLINE_LINUX_DEFAULT="systemd.unified_cgroup_hierarchy=0 ipv6.disable=1 /' /etc/default/grub
|
||||||
|
update-grub
|
||||||
|
|
||||||
|
# Add br_netfilter kernel module
|
||||||
|
echo "br_netfilter" >> /etc/modules
|
||||||
|
|
||||||
|
# Set network tunables
|
||||||
|
cat <<EOF >> /etc/sysctl.d/10-kubernetes.conf
|
||||||
|
net.bridge.bridge-nf-call-iptables=1
|
||||||
|
net.ipv4.ip_forward=1
|
||||||
|
EOF
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
set -g default-shell /bin/bash
|
||||||
|
set -g mouse on
|
||||||
|
bind -n C-x setw synchronize-panes
|
|
@ -1,5 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Point to Google's DNS server
|
||||||
sed -i -e 's/#DNS=/DNS=8.8.8.8/' /etc/systemd/resolved.conf
|
sed -i -e 's/#DNS=/DNS=8.8.8.8/' /etc/systemd/resolved.conf
|
||||||
|
|
||||||
service systemd-resolved restart
|
service systemd-resolved restart
|
|
@ -1,17 +1,22 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
#
|
||||||
|
# Set up /etc/hosts so we can resolve all the machines in the VirtualBox network
|
||||||
|
set -ex
|
||||||
IFNAME=$1
|
IFNAME=$1
|
||||||
|
THISHOST=$2
|
||||||
ADDRESS="$(ip -4 addr show $IFNAME | grep "inet" | head -1 |awk '{print $2}' | cut -d/ -f1)"
|
ADDRESS="$(ip -4 addr show $IFNAME | grep "inet" | head -1 |awk '{print $2}' | cut -d/ -f1)"
|
||||||
|
NETWORK=$(echo $ADDRESS | awk 'BEGIN {FS="."} ; { printf("%s.%s.%s", $1, $2, $3) }')
|
||||||
sed -e "s/^.*${HOSTNAME}.*/${ADDRESS} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts
|
sed -e "s/^.*${HOSTNAME}.*/${ADDRESS} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts
|
||||||
|
|
||||||
# remove ubuntu-bionic entry
|
# remove ubuntu-jammy entry
|
||||||
sed -e '/^.*ubuntu-bionic.*/d' -i /etc/hosts
|
sed -e '/^.*ubuntu-jammy.*/d' -i /etc/hosts
|
||||||
|
sed -e "/^.*$2.*/d" -i /etc/hosts
|
||||||
|
|
||||||
# Update /etc/hosts about other hosts
|
# Update /etc/hosts about other hosts
|
||||||
cat >> /etc/hosts <<EOF
|
cat >> /etc/hosts <<EOF
|
||||||
192.168.5.11 master-1
|
${NETWORK}.11 master-1
|
||||||
192.168.5.12 master-2
|
${NETWORK}.12 master-2
|
||||||
192.168.5.21 worker-1
|
${NETWORK}.21 worker-1
|
||||||
192.168.5.22 worker-2
|
${NETWORK}.22 worker-2
|
||||||
192.168.5.30 lb
|
${NETWORK}.30 loadbalancer
|
||||||
EOF
|
EOF
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
set nu
|
||||||
|
set ts=2
|
||||||
|
set sw=2
|
||||||
|
set et
|
||||||
|
set ai
|
||||||
|
set pastetoggle=<F3>
|
Loading…
Reference in New Issue