kubernetes-the-hard-way-on-vagrant
parent
bf2850974e
commit
22ae1a2ffc
|
@ -47,3 +47,7 @@ service-account-key.pem
|
|||
service-account.csr
|
||||
service-account.pem
|
||||
service-account-csr.json
|
||||
.idea
|
||||
ubuntu-bionic*.log
|
||||
.vagrant
|
||||
temp
|
|
@ -0,0 +1,78 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="MarkdownProjectSettings" wasCopied="true">
|
||||
<PreviewSettings splitEditorLayout="SPLIT" splitEditorPreview="PREVIEW" useGrayscaleRendering="false" zoomFactor="1.0" maxImageWidth="0" showGitHubPageIfSynced="false" allowBrowsingInPreview="false" synchronizePreviewPosition="true" highlightPreviewType="NONE" highlightFadeOut="5" highlightOnTyping="true" synchronizeSourcePosition="true" verticallyAlignSourceAndPreviewSyncPosition="true" showSearchHighlightsInPreview="false" showSelectionInPreview="true" openRemoteLinks="true" replaceUnicodeEmoji="false" lastLayoutSetsDefault="false">
|
||||
<PanelProvider>
|
||||
<provider providerId="com.vladsch.idea.multimarkdown.editor.swing.html.panel" providerName="Default - Swing" />
|
||||
</PanelProvider>
|
||||
</PreviewSettings>
|
||||
<ParserSettings gitHubSyntaxChange="false" emojiShortcuts="1" emojiImages="0">
|
||||
<PegdownExtensions>
|
||||
<option name="ABBREVIATIONS" value="false" />
|
||||
<option name="ANCHORLINKS" value="true" />
|
||||
<option name="ASIDE" value="false" />
|
||||
<option name="ATXHEADERSPACE" value="true" />
|
||||
<option name="AUTOLINKS" value="true" />
|
||||
<option name="DEFINITIONS" value="false" />
|
||||
<option name="DEFINITION_BREAK_DOUBLE_BLANK_LINE" value="false" />
|
||||
<option name="FENCED_CODE_BLOCKS" value="true" />
|
||||
<option name="FOOTNOTES" value="false" />
|
||||
<option name="HARDWRAPS" value="false" />
|
||||
<option name="HTML_DEEP_PARSER" value="false" />
|
||||
<option name="INSERTED" value="false" />
|
||||
<option name="QUOTES" value="false" />
|
||||
<option name="RELAXEDHRULES" value="true" />
|
||||
<option name="SMARTS" value="false" />
|
||||
<option name="STRIKETHROUGH" value="true" />
|
||||
<option name="SUBSCRIPT" value="false" />
|
||||
<option name="SUPERSCRIPT" value="false" />
|
||||
<option name="SUPPRESS_HTML_BLOCKS" value="false" />
|
||||
<option name="SUPPRESS_INLINE_HTML" value="false" />
|
||||
<option name="TABLES" value="true" />
|
||||
<option name="TASKLISTITEMS" value="true" />
|
||||
<option name="TOC" value="false" />
|
||||
<option name="WIKILINKS" value="true" />
|
||||
</PegdownExtensions>
|
||||
<ParserOptions>
|
||||
<option name="ADMONITION_EXT" value="false" />
|
||||
<option name="ATTRIBUTES_EXT" value="false" />
|
||||
<option name="COMMONMARK_LISTS" value="true" />
|
||||
<option name="DUMMY" value="false" />
|
||||
<option name="EMOJI_SHORTCUTS" value="true" />
|
||||
<option name="ENUMERATED_REFERENCES_EXT" value="false" />
|
||||
<option name="FLEXMARK_FRONT_MATTER" value="false" />
|
||||
<option name="GFM_LOOSE_BLANK_LINE_AFTER_ITEM_PARA" value="false" />
|
||||
<option name="GFM_TABLE_RENDERING" value="true" />
|
||||
<option name="GITBOOK_URL_ENCODING" value="false" />
|
||||
<option name="GITHUB_LISTS" value="false" />
|
||||
<option name="GITHUB_WIKI_LINKS" value="true" />
|
||||
<option name="HEADER_ID_NO_DUPED_DASHES" value="false" />
|
||||
<option name="JEKYLL_FRONT_MATTER" value="false" />
|
||||
<option name="NO_TEXT_ATTRIBUTES" value="false" />
|
||||
<option name="PARSE_HTML_ANCHOR_ID" value="false" />
|
||||
<option name="SIM_TOC_BLANK_LINE_SPACER" value="true" />
|
||||
</ParserOptions>
|
||||
</ParserSettings>
|
||||
<HtmlSettings headerTopEnabled="false" headerBottomEnabled="false" bodyTopEnabled="false" bodyBottomEnabled="false" embedUrlContent="false" addPageHeader="true" embedImages="false" embedHttpImages="false" imageUriSerials="false">
|
||||
<GeneratorProvider>
|
||||
<provider providerId="com.vladsch.idea.multimarkdown.editor.swing.html.generator" providerName="Default Swing HTML Generator" />
|
||||
</GeneratorProvider>
|
||||
<headerTop />
|
||||
<headerBottom />
|
||||
<bodyTop />
|
||||
<bodyBottom />
|
||||
</HtmlSettings>
|
||||
<CssSettings previewScheme="UI_SCHEME" cssUri="" isCssUriEnabled="false" isCssUriSerial="false" isCssTextEnabled="false" isDynamicPageWidth="true">
|
||||
<StylesheetProvider>
|
||||
<provider providerId="com.vladsch.idea.multimarkdown.editor.swing.html.css" providerName="Default Swing Stylesheet" />
|
||||
</StylesheetProvider>
|
||||
<ScriptProviders />
|
||||
<cssText />
|
||||
<cssUriHistory />
|
||||
</CssSettings>
|
||||
<HtmlExportSettings updateOnSave="false" parentDir="" targetDir="" cssDir="" scriptDir="" plainHtml="false" imageDir="" copyLinkedImages="false" imageUniquifyType="0" targetExt="" useTargetExt="false" noCssNoScripts="false" linkToExportedHtml="true" exportOnSettingsChange="true" regenerateOnProjectOpen="false" linkFormatType="HTTP_ABSOLUTE" />
|
||||
<LinkMapSettings>
|
||||
<textMaps />
|
||||
</LinkMapSettings>
|
||||
</component>
|
||||
</project>
|
32
README.md
32
README.md
|
@ -1,9 +1,16 @@
|
|||
# Kubernetes The Hard Way
|
||||
|
||||
This tutorial walks you through setting up Kubernetes the hard way. This guide is not for people looking for a fully automated command to bring up a Kubernetes cluster. If that's you then check out [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine), or the [Getting Started Guides](http://kubernetes.io/docs/getting-started-guides/).
|
||||
This tutorial walks you through setting up Kubernetes the hard way on a local machine using VirtualBox.
|
||||
This guide is not for people looking for a fully automated command to bring up a Kubernetes cluster.
|
||||
If that's you then check out [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine), or the [Getting Started Guides](http://kubernetes.io/docs/getting-started-guides/).
|
||||
|
||||
Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.
|
||||
|
||||
This tutorial is a modified version of the original developed by (Kelsey Hightower)[https://github.com/kelseyhightower/kubernetes-the-hard-way].
|
||||
While the original one uses GCP as the platform to deploy kubernetes, we use VirtualBox and Vagrant to deploy a cluster on a local machine. If you prefer the cloud version, refer to the original one (here)[https://github.com/kelseyhightower/kubernetes-the-hard-way]
|
||||
|
||||
Another difference is that we use Docker instead of containerd. There are a few other differences to the original and they are documented [here](differences-to-original.md)
|
||||
|
||||
> The results of this tutorial should not be viewed as production ready, and may receive limited support from the community, but don't let that stop you from learning!
|
||||
|
||||
## Target Audience
|
||||
|
@ -14,17 +21,15 @@ The target audience for this tutorial is someone planning to support a productio
|
|||
|
||||
Kubernetes The Hard Way guides you through bootstrapping a highly available Kubernetes cluster with end-to-end encryption between components and RBAC authentication.
|
||||
|
||||
* [Kubernetes](https://github.com/kubernetes/kubernetes) 1.12.0
|
||||
* [containerd Container Runtime](https://github.com/containerd/containerd) 1.2.0-rc.0
|
||||
* [gVisor](https://github.com/google/gvisor) 50c283b9f56bb7200938d9e207355f05f79f0d17
|
||||
* [CNI Container Networking](https://github.com/containernetworking/cni) 0.6.0
|
||||
* [Kubernetes](https://github.com/kubernetes/kubernetes) 1.13.0
|
||||
* [Docker Container Runtime](https://github.com/containerd/containerd) 18.06
|
||||
* [CNI Container Networking](https://github.com/containernetworking/cni) 0.7.5
|
||||
* [Weave Networking](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/)
|
||||
* [etcd](https://github.com/coreos/etcd) v3.3.9
|
||||
* [CoreDNS](https://github.com/coredns/coredns) v1.2.2
|
||||
|
||||
## Labs
|
||||
|
||||
This tutorial assumes you have access to the [Google Cloud Platform](https://cloud.google.com). While GCP is used for basic infrastructure requirements the lessons learned in this tutorial can be applied to other platforms.
|
||||
|
||||
* [Prerequisites](docs/01-prerequisites.md)
|
||||
* [Installing the Client Tools](docs/02-client-tools.md)
|
||||
* [Provisioning Compute Resources](docs/03-compute-resources.md)
|
||||
|
@ -34,8 +39,11 @@ This tutorial assumes you have access to the [Google Cloud Platform](https://clo
|
|||
* [Bootstrapping the etcd Cluster](docs/07-bootstrapping-etcd.md)
|
||||
* [Bootstrapping the Kubernetes Control Plane](docs/08-bootstrapping-kubernetes-controllers.md)
|
||||
* [Bootstrapping the Kubernetes Worker Nodes](docs/09-bootstrapping-kubernetes-workers.md)
|
||||
* [Configuring kubectl for Remote Access](docs/10-configuring-kubectl.md)
|
||||
* [Provisioning Pod Network Routes](docs/11-pod-network-routes.md)
|
||||
* [Deploying the DNS Cluster Add-on](docs/12-dns-addon.md)
|
||||
* [Smoke Test](docs/13-smoke-test.md)
|
||||
* [Cleaning Up](docs/14-cleanup.md)
|
||||
* [TLS Bootstrapping the Kubernetes Worker Nodes](docs/10-tls-bootstrapping-kubernetes-workers.md)
|
||||
* [Configuring kubectl for Remote Access](docs/11-configuring-kubectl.md)
|
||||
* [Deploy Weave - Pod Networking Solution](docs/12-configure-pod-networking.md)
|
||||
* [Deploying the DNS Cluster Add-on](docs/13-dns-addon.md)
|
||||
* [Kube API Server to Kubelet Configuration](docs/14-kube-apiserver-to-kubelet.md)
|
||||
* [Smoke Test](docs/15-smoke-test.md)
|
||||
* [E2E Test](docs/16-e2e-tests.md)
|
||||
* [Extra - Dynamic Kubelet Configuration](docs/17-extra-dynamic-kubelet-configuration.md)
|
||||
|
|
|
@ -25,7 +25,7 @@ metadata:
|
|||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: 10.32.0.10
|
||||
clusterIP: 10.96.0.10
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
|
|
|
@ -1,57 +1,29 @@
|
|||
# Prerequisites
|
||||
|
||||
## Google Cloud Platform
|
||||
## VM Hardware Requirements
|
||||
|
||||
This tutorial leverages the [Google Cloud Platform](https://cloud.google.com/) to streamline provisioning of the compute infrastructure required to bootstrap a Kubernetes cluster from the ground up. [Sign up](https://cloud.google.com/free/) for $300 in free credits.
|
||||
8 GB of RAM (Preferebly 16 GB)
|
||||
50 GB Disk space
|
||||
|
||||
[Estimated cost](https://cloud.google.com/products/calculator/#id=78df6ced-9c50-48f8-a670-bc5003f2ddaa) to run this tutorial: $0.22 per hour ($5.39 per day).
|
||||
## Virtual Box
|
||||
|
||||
> The compute resources required for this tutorial exceed the Google Cloud Platform free tier.
|
||||
Download and Install [VirtualBox](https://www.virtualbox.org/wiki/Downloads) on any one of the supported platforms:
|
||||
|
||||
## Google Cloud Platform SDK
|
||||
- Windows hosts
|
||||
- OS X hosts
|
||||
- Linux distributions
|
||||
- Solaris hosts
|
||||
|
||||
### Install the Google Cloud SDK
|
||||
## Vagrant
|
||||
|
||||
Follow the Google Cloud SDK [documentation](https://cloud.google.com/sdk/) to install and configure the `gcloud` command line utility.
|
||||
Once VirtualBox is installed you may chose to deploy virtual machines manually on it.
|
||||
Vagrant provides an easier way to deploy multiple virtual machines on VirtualBox more consistenlty.
|
||||
|
||||
Verify the Google Cloud SDK version is 218.0.0 or higher:
|
||||
Download and Install [Vagrant](https://www.vagrantup.com/) on your platform.
|
||||
|
||||
```
|
||||
gcloud version
|
||||
```
|
||||
|
||||
### Set a Default Compute Region and Zone
|
||||
|
||||
This tutorial assumes a default compute region and zone have been configured.
|
||||
|
||||
If you are using the `gcloud` command-line tool for the first time `init` is the easiest way to do this:
|
||||
|
||||
```
|
||||
gcloud init
|
||||
```
|
||||
|
||||
Otherwise set a default compute region:
|
||||
|
||||
```
|
||||
gcloud config set compute/region us-west1
|
||||
```
|
||||
|
||||
Set a default compute zone:
|
||||
|
||||
```
|
||||
gcloud config set compute/zone us-west1-c
|
||||
```
|
||||
|
||||
> Use the `gcloud compute zones list` command to view additional regions and zones.
|
||||
|
||||
## Running Commands in Parallel with tmux
|
||||
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. Labs in this tutorial may require running the same commands across multiple compute instances, in those cases consider using tmux and splitting a window into multiple panes with `synchronize-panes` enabled to speed up the provisioning process.
|
||||
|
||||
> The use of tmux is optional and not required to complete this tutorial.
|
||||
|
||||

|
||||
|
||||
> Enable `synchronize-panes`: `ctrl+b` then `shift :`. Then type `set synchronize-panes on` at the prompt. To disable synchronization: `set synchronize-panes off`.
|
||||
|
||||
Next: [Installing the Client Tools](02-client-tools.md)
|
||||
- Windows
|
||||
- Debian
|
||||
- Centos
|
||||
- Linux
|
||||
- macOS
|
||||
- Arch Linux
|
|
@ -1,95 +1,15 @@
|
|||
# Installing the Client Tools
|
||||
|
||||
In this lab you will install the command line utilities required to complete this tutorial: [cfssl](https://github.com/cloudflare/cfssl), [cfssljson](https://github.com/cloudflare/cfssl), and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl).
|
||||
|
||||
|
||||
## Install CFSSL
|
||||
|
||||
The `cfssl` and `cfssljson` command line utilities will be used to provision a [PKI Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure) and generate TLS certificates.
|
||||
|
||||
Download and install `cfssl` and `cfssljson` from the [cfssl repository](https://pkg.cfssl.org):
|
||||
|
||||
### OS X
|
||||
|
||||
```
|
||||
curl -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
|
||||
curl -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
|
||||
```
|
||||
|
||||
```
|
||||
chmod +x cfssl cfssljson
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv cfssl cfssljson /usr/local/bin/
|
||||
```
|
||||
|
||||
Some OS X users may experience problems using the pre-built binaries in which case [Homebrew](https://brew.sh) might be a better option:
|
||||
|
||||
```
|
||||
brew install cfssl
|
||||
```
|
||||
|
||||
### Linux
|
||||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
|
||||
https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
|
||||
```
|
||||
|
||||
```
|
||||
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
Verify `cfssl` version 1.2.0 or higher is installed:
|
||||
|
||||
```
|
||||
cfssl version
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
Version: 1.2.0
|
||||
Revision: dev
|
||||
Runtime: go1.6
|
||||
```
|
||||
|
||||
> The cfssljson command line utility does not provide a way to print its version.
|
||||
In this lab you will install the command line utilities required to complete this tutorial: [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl).
|
||||
|
||||
## Install kubectl
|
||||
|
||||
The `kubectl` command line utility is used to interact with the Kubernetes API Server. Download and install `kubectl` from the official release binaries:
|
||||
|
||||
### OS X
|
||||
|
||||
```
|
||||
curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/darwin/amd64/kubectl
|
||||
```
|
||||
|
||||
```
|
||||
chmod +x kubectl
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv kubectl /usr/local/bin/
|
||||
```
|
||||
|
||||
### Linux
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -102,7 +22,7 @@ sudo mv kubectl /usr/local/bin/
|
|||
|
||||
### Verification
|
||||
|
||||
Verify `kubectl` version 1.12.0 or higher is installed:
|
||||
Verify `kubectl` version 1.13.0 or higher is installed:
|
||||
|
||||
```
|
||||
kubectl version --client
|
||||
|
@ -111,7 +31,7 @@ kubectl version --client
|
|||
> output
|
||||
|
||||
```
|
||||
Client Version: version.Info{Major:"1", Minor:"12", GitVersion:"v1.12.0", GitCommit:"0ed33881dc4355495f623c6f22e7dd0b7632b7c0", GitTreeState:"clean", BuildDate:"2018-09-27T17:05:32Z", GoVersion:"go1.10.4", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.0", GitCommit:"ddf47ac13c1a9483ea035a79cd7c10005ff21a6d", GitTreeState:"clean", BuildDate:"2018-12-03T21:04:45Z", GoVersion:"go1.11.2", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
Next: [Provisioning Compute Resources](03-compute-resources.md)
|
||||
|
|
|
@ -1,230 +1,99 @@
|
|||
# Provisioning Compute Resources
|
||||
|
||||
Kubernetes requires a set of machines to host the Kubernetes control plane and the worker nodes where containers are ultimately run. In this lab you will provision the compute resources required for running a secure and highly available Kubernetes cluster across a single [compute zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones).
|
||||
Note: You must have VirtualBox and Vagrant configured at this point
|
||||
|
||||
> Ensure a default compute zone and region have been set as described in the [Prerequisites](01-prerequisites.md#set-a-default-compute-region-and-zone) lab.
|
||||
Download this github repository and cd into the vagrant folder
|
||||
|
||||
## Networking
|
||||
`github clone https://github.com/mmumshad/kubernetes-the-hard-way.git`
|
||||
|
||||
The Kubernetes [networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#kubernetes-model) assumes a flat network in which containers and nodes can communicate with each other. In cases where this is not desired [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can limit how groups of containers are allowed to communicate with each other and external network endpoints.
|
||||
CD into vagrant directory
|
||||
|
||||
> Setting up network policies is out of scope for this tutorial.
|
||||
`cd kubernetes-the-hard-way.git\vagrant`
|
||||
|
||||
### Virtual Private Cloud Network
|
||||
Run Vagrant up
|
||||
|
||||
In this section a dedicated [Virtual Private Cloud](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) (VPC) network will be setup to host the Kubernetes cluster.
|
||||
`vagrant up`
|
||||
|
||||
Create the `kubernetes-the-hard-way` custom VPC network:
|
||||
|
||||
```
|
||||
gcloud compute networks create kubernetes-the-hard-way --subnet-mode custom
|
||||
```
|
||||
This does the below:
|
||||
|
||||
A [subnet](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster.
|
||||
- Deploys 5 VMs - 2 Master, 2 Worker and 1 Loadbalancer with the name 'kubernetes-ha-* '
|
||||
> This is the default settings. This can be changed at the top of the Vagrant file
|
||||
|
||||
Create the `kubernetes` subnet in the `kubernetes-the-hard-way` VPC network:
|
||||
- Set's IP addresses in the range 192.168.5
|
||||
|
||||
```
|
||||
gcloud compute networks subnets create kubernetes \
|
||||
--network kubernetes-the-hard-way \
|
||||
--range 10.240.0.0/24
|
||||
```
|
||||
| VM | VM Name | Purpose | IP | Forwarded Port |
|
||||
| ------------ | ---------------------- |:-------------:| ------------:| ----------------:|
|
||||
| master-1 | kubernetes-ha-master-1 | Master | 192.168.5.11 | 2711 |
|
||||
| master-1 | kubernetes-ha-master-2 | Master | 192.168.5.12 | 2712 |
|
||||
| worker-1 | kubernetes-ha-worker-1 | Worker | 192.168.5.21 | 2730 |
|
||||
| worker-2 | kubernetes-ha-worker-2 | Worker | 192.168.5.22 | 2721 |
|
||||
| loadbalancer | kubernetes-ha-lb | LoadBalancer | 192.168.5.30 | 2722 |
|
||||
|
||||
> The `10.240.0.0/24` IP address range can host up to 254 compute instances.
|
||||
> These are the default settings. These can be changed in the Vagrant file
|
||||
|
||||
### Firewall Rules
|
||||
- Add's a DNS entry to each of the nodes to access internet
|
||||
> DNS: 8.8.8.8
|
||||
|
||||
Create a firewall rule that allows internal communication across all protocols:
|
||||
- Install's Docker on Master and Worker nodes
|
||||
- Runs the below command on all nodes to allow for network forwarding in IP Tables.
|
||||
This is required for kubernetes networking to function correctly.
|
||||
> sysctl net.bridge.bridge-nf-call-iptables=1
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-internal \
|
||||
--allow tcp,udp,icmp \
|
||||
--network kubernetes-the-hard-way \
|
||||
--source-ranges 10.240.0.0/24,10.200.0.0/16
|
||||
```
|
||||
|
||||
Create a firewall rule that allows external SSH, ICMP, and HTTPS:
|
||||
## SSH to the nodes
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-external \
|
||||
--allow tcp:22,tcp:6443,icmp \
|
||||
--network kubernetes-the-hard-way \
|
||||
--source-ranges 0.0.0.0/0
|
||||
```
|
||||
There are two ways to SSH into the nodes:
|
||||
|
||||
> An [external load balancer](https://cloud.google.com/compute/docs/load-balancing/network/) will be used to expose the Kubernetes API Servers to remote clients.
|
||||
### 1. SSH using Vagrant
|
||||
|
||||
List the firewall rules in the `kubernetes-the-hard-way` VPC network:
|
||||
From the directory you ran the `vagrant up` command, run `vagrant ssh <vm>` for example `vagrant ssh master-1`.
|
||||
> Note: Use VM field from the above table and not the vm name itself.
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules list --filter="network:kubernetes-the-hard-way"
|
||||
```
|
||||
### 2. SSH Using SSH Client Tools
|
||||
|
||||
> output
|
||||
Use your favourite SSH Terminal tool (putty).
|
||||
|
||||
```
|
||||
NAME NETWORK DIRECTION PRIORITY ALLOW DENY
|
||||
kubernetes-the-hard-way-allow-external kubernetes-the-hard-way INGRESS 1000 tcp:22,tcp:6443,icmp
|
||||
kubernetes-the-hard-way-allow-internal kubernetes-the-hard-way INGRESS 1000 tcp,udp,icmp
|
||||
```
|
||||
Use the above IP addresses. Username and password based SSH is disabled by default.
|
||||
Vagrant generates a private key for each of these VMs. It is placed under the .vagrant folder (in the directory you ran the `vagrant up` command from) at the below path for each VM:
|
||||
|
||||
### Kubernetes Public IP Address
|
||||
**Private Key Path:** `.vagrant/machines/<machine name>/virtualbox/private_key`
|
||||
|
||||
Allocate a static IP address that will be attached to the external load balancer fronting the Kubernetes API Servers:
|
||||
**Username:** `vagrant`
|
||||
|
||||
```
|
||||
gcloud compute addresses create kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region)
|
||||
```
|
||||
|
||||
Verify the `kubernetes-the-hard-way` static IP address was created in your default compute region:
|
||||
## Verify Environment
|
||||
|
||||
```
|
||||
gcloud compute addresses list --filter="name=('kubernetes-the-hard-way')"
|
||||
```
|
||||
- Ensure all VMs are up
|
||||
- Ensure VMs are assigned the above IP addresses
|
||||
- Ensure you can SSH into these VMs using the IP and private keys
|
||||
- Ensure the VMs can ping each other
|
||||
- Ensure the master and worker nodes have Docker installed on them. Version: 18.06
|
||||
> command `sudo docker version`
|
||||
|
||||
> output
|
||||
## Troubleshooting Tips
|
||||
|
||||
```
|
||||
NAME REGION ADDRESS STATUS
|
||||
kubernetes-the-hard-way us-west1 XX.XXX.XXX.XX RESERVED
|
||||
```
|
||||
If any of the VMs failed to provision, or is not configured correct, delete the vm using the command:
|
||||
|
||||
## Compute Instances
|
||||
`vagrant destroy <vm>`
|
||||
|
||||
The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 18.04, which has good support for the [containerd container runtime](https://github.com/containerd/containerd). Each compute instance will be provisioned with a fixed private IP address to simplify the Kubernetes bootstrapping process.
|
||||
Then reprovision. Only the missing VMs will be re-provisioned
|
||||
|
||||
### Kubernetes Controllers
|
||||
`vagrant up`
|
||||
|
||||
Create three compute instances which will host the Kubernetes control plane:
|
||||
|
||||
```
|
||||
for i in 0 1 2; do
|
||||
gcloud compute instances create controller-${i} \
|
||||
--async \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image-family ubuntu-1804-lts \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.1${i} \
|
||||
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
|
||||
--subnet kubernetes \
|
||||
--tags kubernetes-the-hard-way,controller
|
||||
done
|
||||
```
|
||||
Sometimes the delete does not delete the folder created for the vm and throws the below error.
|
||||
|
||||
### Kubernetes Workers
|
||||
VirtualBox error:
|
||||
|
||||
Each worker instance requires a pod subnet allocation from the Kubernetes cluster CIDR range. The pod subnet allocation will be used to configure container networking in a later exercise. The `pod-cidr` instance metadata will be used to expose pod subnet allocations to compute instances at runtime.
|
||||
VBoxManage.exe: error: Could not rename the directory 'D:\VirtualBox VMs\ubuntu-bionic-18.04-cloudimg-20190122_1552891552601_76806' to 'D:\VirtualBox VMs\kubernetes-ha-worker-2' to save the settings file (VERR_ALREADY_EXISTS)
|
||||
VBoxManage.exe: error: Details: code E_FAIL (0x80004005), component SessionMachine, interface IMachine, callee IUnknown
|
||||
VBoxManage.exe: error: Context: "SaveSettings()" at line 3105 of file VBoxManageModifyVM.cpp
|
||||
|
||||
> The Kubernetes cluster CIDR range is defined by the Controller Manager's `--cluster-cidr` flag. In this tutorial the cluster CIDR range will be set to `10.200.0.0/16`, which supports 254 subnets.
|
||||
In such cases delete the VM, then delete teh VM folder and then re-provision
|
||||
|
||||
Create three compute instances which will host the Kubernetes worker nodes:
|
||||
`vagrant destroy <vm>`
|
||||
|
||||
```
|
||||
for i in 0 1 2; do
|
||||
gcloud compute instances create worker-${i} \
|
||||
--async \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image-family ubuntu-1804-lts \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--metadata pod-cidr=10.200.${i}.0/24 \
|
||||
--private-network-ip 10.240.0.2${i} \
|
||||
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
|
||||
--subnet kubernetes \
|
||||
--tags kubernetes-the-hard-way,worker
|
||||
done
|
||||
```
|
||||
`rmdir "<path-to-vm-folder>\kubernetes-ha-worker-2"`
|
||||
|
||||
### Verification
|
||||
|
||||
List the compute instances in your default compute zone:
|
||||
|
||||
```
|
||||
gcloud compute instances list
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS
|
||||
controller-0 us-west1-c n1-standard-1 10.240.0.10 XX.XXX.XXX.XXX RUNNING
|
||||
controller-1 us-west1-c n1-standard-1 10.240.0.11 XX.XXX.X.XX RUNNING
|
||||
controller-2 us-west1-c n1-standard-1 10.240.0.12 XX.XXX.XXX.XX RUNNING
|
||||
worker-0 us-west1-c n1-standard-1 10.240.0.20 XXX.XXX.XXX.XX RUNNING
|
||||
worker-1 us-west1-c n1-standard-1 10.240.0.21 XX.XXX.XX.XXX RUNNING
|
||||
worker-2 us-west1-c n1-standard-1 10.240.0.22 XXX.XXX.XX.XX RUNNING
|
||||
```
|
||||
|
||||
## Configuring SSH Access
|
||||
|
||||
SSH will be used to configure the controller and worker instances. When connecting to compute instances for the first time SSH keys will be generated for you and stored in the project or instance metadata as describe in the [connecting to instances](https://cloud.google.com/compute/docs/instances/connecting-to-instance) documentation.
|
||||
|
||||
Test SSH access to the `controller-0` compute instances:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
```
|
||||
|
||||
If this is your first time connecting to a compute instance SSH keys will be generated for you. Enter a passphrase at the prompt to continue:
|
||||
|
||||
```
|
||||
WARNING: The public SSH key file for gcloud does not exist.
|
||||
WARNING: The private SSH key file for gcloud does not exist.
|
||||
WARNING: You do not have an SSH key for gcloud.
|
||||
WARNING: SSH keygen will be executed to generate a key.
|
||||
Generating public/private rsa key pair.
|
||||
Enter passphrase (empty for no passphrase):
|
||||
Enter same passphrase again:
|
||||
```
|
||||
|
||||
At this point the generated SSH keys will be uploaded and stored in your project:
|
||||
|
||||
```
|
||||
Your identification has been saved in /home/$USER/.ssh/google_compute_engine.
|
||||
Your public key has been saved in /home/$USER/.ssh/google_compute_engine.pub.
|
||||
The key fingerprint is:
|
||||
SHA256:nz1i8jHmgQuGt+WscqP5SeIaSy5wyIJeL71MuV+QruE $USER@$HOSTNAME
|
||||
The key's randomart image is:
|
||||
+---[RSA 2048]----+
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| . |
|
||||
|o. oS |
|
||||
|=... .o .o o |
|
||||
|+.+ =+=.+.X o |
|
||||
|.+ ==O*B.B = . |
|
||||
| .+.=EB++ o |
|
||||
+----[SHA256]-----+
|
||||
Updating project ssh metadata...-Updated [https://www.googleapis.com/compute/v1/projects/$PROJECT_ID].
|
||||
Updating project ssh metadata...done.
|
||||
Waiting for SSH key to propagate.
|
||||
```
|
||||
|
||||
After the SSH keys have been updated you'll be logged into the `controller-0` instance:
|
||||
|
||||
```
|
||||
Welcome to Ubuntu 18.04 LTS (GNU/Linux 4.15.0-1006-gcp x86_64)
|
||||
|
||||
...
|
||||
|
||||
Last login: Sun May 13 14:34:27 2018 from XX.XXX.XXX.XX
|
||||
```
|
||||
|
||||
Type `exit` at the prompt to exit the `controller-0` compute instance:
|
||||
|
||||
```
|
||||
$USER@controller-0:~$ exit
|
||||
```
|
||||
> output
|
||||
|
||||
```
|
||||
logout
|
||||
Connection to XX.XXX.XXX.XXX closed
|
||||
```
|
||||
|
||||
Next: [Provisioning a CA and Generating TLS Certificates](04-certificate-authority.md)
|
||||
`vagrant up`
|
||||
|
|
|
@ -1,63 +1,52 @@
|
|||
# Provisioning a CA and Generating TLS Certificates
|
||||
|
||||
In this lab you will provision a [PKI Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure) using CloudFlare's PKI toolkit, [cfssl](https://github.com/cloudflare/cfssl), then use it to bootstrap a Certificate Authority, and generate TLS certificates for the following components: etcd, kube-apiserver, kube-controller-manager, kube-scheduler, kubelet, and kube-proxy.
|
||||
In this lab you will provision a [PKI Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure) using the popular openssl tool, then use it to bootstrap a Certificate Authority, and generate TLS certificates for the following components: etcd, kube-apiserver, kube-controller-manager, kube-scheduler, kubelet, and kube-proxy.
|
||||
|
||||
# Where to do these?
|
||||
|
||||
You can do these on any machine with `openssl` on it. But you should be able to copy the generated files to the provisioned VMs. Or just do these from one of the master nodes.
|
||||
|
||||
In my case I do it on the master-1 node, so I create an SSH key pair from the master-1 node and place them in the authorized_keys on the other nodes.
|
||||
|
||||
Generate Key Pair on master-1 node
|
||||
`ssh-keygen`
|
||||
|
||||
Move public key to other nodes
|
||||
|
||||
```cat >> ~/.ssh/authorized_keys <<EOF
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiOE4ljVBCoQqtW26sWuYuC5UA91JtqC9ikWl9xDmpd0E8d5/WqvEBPzoUTe3w3pBzWJ8Zho1Uyf8zPhmwE1+l0LsgrtKmFNhh2bRcdptvUCJddrhvfC39BalAg9rYPl4qzZrKRI4904/ErRKVBidRR24rSU2fhqFjpsGpdQJOWa4HzRjpfCwvMnPmL1XaU6T8Hsrv4ol+/D+o/YwXBEjE/TrIkMutG1c37batVHsOz3o16NPbsnZnH2nEOZr/dhKmkQn0qshs/6GvU5glx5rnGbnrykj3t6xGmkbdfDVUYiXwS4BBRp8FYmlBuVn9wMGdZxZSDmH2E1yIplP8+08b vagrant@master-1
|
||||
EOF
|
||||
```
|
||||
|
||||
## Certificate Authority
|
||||
|
||||
In this section you will provision a Certificate Authority that can be used to generate additional TLS certificates.
|
||||
|
||||
Generate the CA configuration file, certificate, and private key:
|
||||
Create a CA certificate, then generate a Certificate Signing Request and use it to create a private key:
|
||||
|
||||
|
||||
```
|
||||
{
|
||||
# Create private key for CA
|
||||
openssl genrsa -out ca.key 2048
|
||||
|
||||
cat > ca-config.json <<EOF
|
||||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"profiles": {
|
||||
"kubernetes": {
|
||||
"usages": ["signing", "key encipherment", "server auth", "client auth"],
|
||||
"expiry": "8760h"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
# Create CSR using the private key
|
||||
openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA" -out ca.csr
|
||||
|
||||
cat > ca-csr.json <<EOF
|
||||
{
|
||||
"CN": "Kubernetes",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "Kubernetes",
|
||||
"OU": "CA",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||
|
||||
}
|
||||
# Self sign the csr using its own private key
|
||||
openssl x509 -req -in ca.csr -signkey ca.key -CAcreateserial -out ca.crt -days 1000
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
ca-key.pem
|
||||
ca.pem
|
||||
ca.crt
|
||||
ca.key
|
||||
```
|
||||
|
||||
The ca.crt is the Kubernetes Certificate Authority certificate and ca.key is the Kubernetes Certificate Authority private key.
|
||||
You will use the ca.crt file in many places, so it will be copied to many places.
|
||||
The ca.key is used by the CA for signing certificates. And it should be securely stored. In this case our master node(s) is our CA server as well, so we will store it on master node(s). There is not need to copy this file to elsewhere.
|
||||
|
||||
## Client and Server Certificates
|
||||
|
||||
In this section you will generate client and server certificates for each Kubernetes component and a client certificate for the Kubernetes `admin` user.
|
||||
|
@ -67,139 +56,47 @@ In this section you will generate client and server certificates for each Kubern
|
|||
Generate the `admin` client certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
# Geenrate private key for admin user
|
||||
openssl genrsa -out admin.key 2048
|
||||
|
||||
cat > admin-csr.json <<EOF
|
||||
{
|
||||
"CN": "admin",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:masters",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
# Generate CSR for admin user. Note the OU.
|
||||
openssl req -new -key admin.key -subj "/CN=admin/O=system:masters" -out admin.csr
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
admin-csr.json | cfssljson -bare admin
|
||||
|
||||
}
|
||||
# Sign certificate for admin user using CA servers private key
|
||||
openssl x509 -req -in admin.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out admin.crt
|
||||
```
|
||||
|
||||
Note that the admin user is part of the **system:masters** group. This is how we are able to perform any administrative operations on Kubernetes cluster using kubectl utility.
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
admin-key.pem
|
||||
admin.pem
|
||||
admin.key
|
||||
admin.crt
|
||||
```
|
||||
|
||||
The admin.crt and admin.key file gives you administrative access. We will configure these to be used with the kubectl tool to perform administrative functions on kubernetes.
|
||||
|
||||
### The Kubelet Client Certificates
|
||||
|
||||
Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/docs/admin/authorization/node/) called Node Authorizer, that specifically authorizes API requests made by [Kubelets](https://kubernetes.io/docs/concepts/overview/components/#kubelet). In order to be authorized by the Node Authorizer, Kubelets must use a credential that identifies them as being in the `system:nodes` group, with a username of `system:node:<nodeName>`. In this section you will create a certificate for each Kubernetes worker node that meets the Node Authorizer requirements.
|
||||
|
||||
Generate a certificate and private key for each Kubernetes worker node:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
cat > ${instance}-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:node:${instance}",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:nodes",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
EXTERNAL_IP=$(gcloud compute instances describe ${instance} \
|
||||
--format 'value(networkInterfaces[0].accessConfigs[0].natIP)')
|
||||
|
||||
INTERNAL_IP=$(gcloud compute instances describe ${instance} \
|
||||
--format 'value(networkInterfaces[0].networkIP)')
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-hostname=${instance},${EXTERNAL_IP},${INTERNAL_IP} \
|
||||
-profile=kubernetes \
|
||||
${instance}-csr.json | cfssljson -bare ${instance}
|
||||
done
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
worker-0-key.pem
|
||||
worker-0.pem
|
||||
worker-1-key.pem
|
||||
worker-1.pem
|
||||
worker-2-key.pem
|
||||
worker-2.pem
|
||||
```
|
||||
We are going to skip certificate configuration for Worker Nodes for now. We will deal with them when we configure the workers.
|
||||
For now let's just focus on the control plane components.
|
||||
|
||||
### The Controller Manager Client Certificate
|
||||
|
||||
Generate the `kube-controller-manager` client certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > kube-controller-manager-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:kube-controller-manager",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:kube-controller-manager",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
|
||||
|
||||
}
|
||||
openssl genrsa -out kube-controller-manager.key 2048
|
||||
openssl req -new -key kube-controller-manager.key -subj "/CN=system:kube-controller-manager" -out kube-controller-manager.csr
|
||||
openssl x509 -req -in kube-controller-manager.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-controller-manager.crt
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
kube-controller-manager-key.pem
|
||||
kube-controller-manager.pem
|
||||
kube-controller-manager.key
|
||||
kube-controller-manager.crt
|
||||
```
|
||||
|
||||
|
||||
|
@ -207,137 +104,119 @@ kube-controller-manager.pem
|
|||
|
||||
Generate the `kube-proxy` client certificate and private key:
|
||||
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > kube-proxy-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:kube-proxy",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:node-proxier",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
kube-proxy-csr.json | cfssljson -bare kube-proxy
|
||||
|
||||
}
|
||||
openssl genrsa -out kube-proxy.key 2048
|
||||
openssl req -new -key kube-proxy.key -subj "/CN=system:kube-proxy" -out kube-proxy.csr
|
||||
openssl x509 -req -in kube-proxy.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-proxy.crt
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
kube-proxy-key.pem
|
||||
kube-proxy.pem
|
||||
kube-proxy.key
|
||||
kube-proxy.crt
|
||||
```
|
||||
|
||||
### The Scheduler Client Certificate
|
||||
|
||||
Generate the `kube-scheduler` client certificate and private key:
|
||||
|
||||
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > kube-scheduler-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:kube-scheduler",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:kube-scheduler",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
|
||||
|
||||
}
|
||||
openssl genrsa -out kube-scheduler.key 2048
|
||||
openssl req -new -key kube-scheduler.key -subj "/CN=system:kube-scheduler" -out kube-scheduler.csr
|
||||
openssl x509 -req -in kube-scheduler.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-scheduler.crt
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
kube-scheduler-key.pem
|
||||
kube-scheduler.pem
|
||||
kube-scheduler.key
|
||||
kube-scheduler.crt
|
||||
```
|
||||
|
||||
|
||||
### The Kubernetes API Server Certificate
|
||||
|
||||
The `kubernetes-the-hard-way` static IP address will be included in the list of subject alternative names for the Kubernetes API Server certificate. This will ensure the certificate can be validated by remote clients.
|
||||
The kube-apiserver certificate requires all names that various components may reach it to be part of the alternate names. These include the different DNS names, and IP addresses such as the master servers IP address, the load balancers IP address, the kube-api service IP address etc.
|
||||
|
||||
Generate the Kubernetes API Server certificate and private key:
|
||||
The `openssl` command cannot take alternate names as command line parameter. So we must create a `conf` file for it:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
|
||||
cat > kubernetes-csr.json <<EOF
|
||||
{
|
||||
"CN": "kubernetes",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "Kubernetes",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
cat > openssl.cnf <<EOF
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = kubernetes
|
||||
DNS.2 = kubernetes.default
|
||||
DNS.3 = kubernetes.default.svc
|
||||
DNS.4 = kubernetes.default.svc.cluster.local
|
||||
IP.1 = 10.96.0.1
|
||||
IP.2 = 192.168.5.11
|
||||
IP.3 = 192.168.5.12
|
||||
IP.4 = 192.168.5.30
|
||||
IP.5 = 127.0.0.1
|
||||
EOF
|
||||
```
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-hostname=10.32.0.1,10.240.0.10,10.240.0.11,10.240.0.12,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,kubernetes.default \
|
||||
-profile=kubernetes \
|
||||
kubernetes-csr.json | cfssljson -bare kubernetes
|
||||
Generates certs for kube-apiserver
|
||||
|
||||
}
|
||||
```
|
||||
openssl genrsa -out kube-apiserver.key 2048
|
||||
openssl req -new -key kube-apiserver.key -subj "/CN=kube-apiserver" -out kube-apiserver.csr -config openssl.cnf
|
||||
openssl x509 -req -in kube-apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out kube-apiserver.crt -extensions v3_req -extfile openssl.cnf
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
kubernetes-key.pem
|
||||
kubernetes.pem
|
||||
kube-apiserver.crt
|
||||
kube-apiserver.key
|
||||
```
|
||||
|
||||
### The ETCD Server Certificate
|
||||
|
||||
Similarly ETCD server certificate must have addresses of all the servers part of the ETCD cluster
|
||||
|
||||
The `openssl` command cannot take alternate names as command line parameter. So we must create a `conf` file for it:
|
||||
|
||||
```
|
||||
cat > openssl-etcd.cnf <<EOF
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
IP.1 = 192.168.5.11
|
||||
IP.2 = 192.168.5.12
|
||||
IP.3 = 127.0.0.1
|
||||
EOF
|
||||
```
|
||||
|
||||
Generates certs for ETCD
|
||||
|
||||
```
|
||||
openssl genrsa -out etcd-server.key 2048
|
||||
openssl req -new -key etcd-server.key -subj "/CN=etcd-server" -out etcd-server.csr -config openssl-etcd.cnf
|
||||
openssl x509 -req -in etcd-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out etcd-server.crt -extensions v3_req -extfile openssl-etcd.cnf
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
kube-apiserver.crt
|
||||
kube-apiserver.key
|
||||
```
|
||||
|
||||
## The Service Account Key Pair
|
||||
|
@ -347,61 +226,29 @@ The Kubernetes Controller Manager leverages a key pair to generate and sign serv
|
|||
Generate the `service-account` certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > service-account-csr.json <<EOF
|
||||
{
|
||||
"CN": "service-accounts",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "Kubernetes",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
service-account-csr.json | cfssljson -bare service-account
|
||||
|
||||
}
|
||||
openssl genrsa -out service-account.key 2048
|
||||
openssl req -new -key service-account.key -subj "/CN=service-accounts" -out service-account.csr
|
||||
openssl x509 -req -in service-account.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out service-account.crt
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
service-account-key.pem
|
||||
service-account.pem
|
||||
service-account.key
|
||||
service-account.crt
|
||||
```
|
||||
|
||||
|
||||
## Distribute the Client and Server Certificates
|
||||
|
||||
Copy the appropriate certificates and private keys to each worker instance:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
gcloud compute scp ca.pem ${instance}-key.pem ${instance}.pem ${instance}:~/
|
||||
done
|
||||
```
|
||||
## Distribute the Certificates
|
||||
|
||||
Copy the appropriate certificates and private keys to each controller instance:
|
||||
|
||||
```
|
||||
for instance in controller-0 controller-1 controller-2; do
|
||||
gcloud compute scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
|
||||
service-account-key.pem service-account.pem ${instance}:~/
|
||||
for instance in master-1 master-2; do
|
||||
scp ca.crt ca.key kube-apiserver.key kube-apiserver.crt \
|
||||
service-account.key service-account.crt \
|
||||
etcd-server.key etcd-server.crt \
|
||||
${instance}:~/
|
||||
done
|
||||
```
|
||||
|
||||
|
|
|
@ -8,33 +8,29 @@ In this section you will generate kubeconfig files for the `controller manager`,
|
|||
|
||||
### Kubernetes Public IP Address
|
||||
|
||||
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the external load balancer fronting the Kubernetes API Servers will be used.
|
||||
|
||||
Retrieve the `kubernetes-the-hard-way` static IP address:
|
||||
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the load balancer will be used. In our case it is `192.168.5.30`
|
||||
|
||||
```
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
LOADBALANCER_ADDRESS=192.168.5.30
|
||||
```
|
||||
|
||||
### The kubelet Kubernetes Configuration File
|
||||
|
||||
When generating kubeconfig files for Kubelets the client certificate matching the Kubelet's node name must be used. This will ensure Kubelets are properly authorized by the Kubernetes [Node Authorizer](https://kubernetes.io/docs/admin/authorization/node/).
|
||||
|
||||
Generate a kubeconfig file for each worker node:
|
||||
Generate a kubeconfig file for the first worker node:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
for instance in worker-1; do
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--certificate-authority=ca.crt \
|
||||
--embed-certs=true \
|
||||
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
|
||||
--server=https://${LOADBALANCER_ADDRESS}:6443 \
|
||||
--kubeconfig=${instance}.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:node:${instance} \
|
||||
--client-certificate=${instance}.pem \
|
||||
--client-key=${instance}-key.pem \
|
||||
--client-certificate=${instance}.crt \
|
||||
--client-key=${instance}.key \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=${instance}.kubeconfig
|
||||
|
||||
|
@ -50,9 +46,7 @@ done
|
|||
Results:
|
||||
|
||||
```
|
||||
worker-0.kubeconfig
|
||||
worker-1.kubeconfig
|
||||
worker-2.kubeconfig
|
||||
```
|
||||
|
||||
### The kube-proxy Kubernetes Configuration File
|
||||
|
@ -62,14 +56,14 @@ Generate a kubeconfig file for the `kube-proxy` service:
|
|||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--certificate-authority=ca.crt \
|
||||
--embed-certs=true \
|
||||
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
|
||||
--server=https://${LOADBALANCER_ADDRESS}:6443 \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-proxy \
|
||||
--client-certificate=kube-proxy.pem \
|
||||
--client-key=kube-proxy-key.pem \
|
||||
--client-certificate=kube-proxy.crt \
|
||||
--client-key=kube-proxy.key \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
|
||||
|
@ -95,14 +89,14 @@ Generate a kubeconfig file for the `kube-controller-manager` service:
|
|||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--certificate-authority=ca.crt \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-controller-manager \
|
||||
--client-certificate=kube-controller-manager.pem \
|
||||
--client-key=kube-controller-manager-key.pem \
|
||||
--client-certificate=kube-controller-manager.crt \
|
||||
--client-key=kube-controller-manager.key \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
|
||||
|
@ -129,14 +123,14 @@ Generate a kubeconfig file for the `kube-scheduler` service:
|
|||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--certificate-authority=ca.crt \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-scheduler \
|
||||
--client-certificate=kube-scheduler.pem \
|
||||
--client-key=kube-scheduler-key.pem \
|
||||
--client-certificate=kube-scheduler.crt \
|
||||
--client-key=kube-scheduler.key \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
|
||||
|
@ -162,14 +156,14 @@ Generate a kubeconfig file for the `admin` user:
|
|||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--certificate-authority=ca.crt \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
|
||||
kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem \
|
||||
--client-certificate=admin.crt \
|
||||
--client-key=admin.key \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
|
||||
|
@ -189,23 +183,29 @@ admin.kubeconfig
|
|||
```
|
||||
|
||||
|
||||
##
|
||||
##
|
||||
|
||||
## Distribute the Kubernetes Configuration Files
|
||||
|
||||
Copy the appropriate `kubelet` and `kube-proxy` kubeconfig files to each worker instance:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
gcloud compute scp ${instance}.kubeconfig kube-proxy.kubeconfig ${instance}:~/
|
||||
for instance in worker-1; do
|
||||
scp ${instance}.kubeconfig ${instance}:~/
|
||||
done
|
||||
```
|
||||
|
||||
```
|
||||
for instance in worker-1 worker-2; do
|
||||
scp kube-proxy.kubeconfig ${instance}:~/
|
||||
done
|
||||
```
|
||||
|
||||
Copy the appropriate `kube-controller-manager` and `kube-scheduler` kubeconfig files to each controller instance:
|
||||
|
||||
```
|
||||
for instance in controller-0 controller-1 controller-2; do
|
||||
gcloud compute scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/
|
||||
for instance in master-1 master-2; do
|
||||
scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/
|
||||
done
|
||||
```
|
||||
|
||||
|
|
|
@ -35,8 +35,8 @@ EOF
|
|||
Copy the `encryption-config.yaml` encryption config file to each controller instance:
|
||||
|
||||
```
|
||||
for instance in controller-0 controller-1 controller-2; do
|
||||
gcloud compute scp encryption-config.yaml ${instance}:~/
|
||||
for instance in master-1 master-2; do
|
||||
scp encryption-config.yaml ${instance}:~/
|
||||
done
|
||||
```
|
||||
|
||||
|
|
|
@ -4,11 +4,7 @@ Kubernetes components are stateless and store cluster state in [etcd](https://gi
|
|||
|
||||
## Prerequisites
|
||||
|
||||
The commands in this lab must be run on each controller instance: `controller-0`, `controller-1`, and `controller-2`. Login to each controller instance using the `gcloud` command. Example:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
```
|
||||
The commands in this lab must be run on each controller instance: `master-1`, and `master-2`. Login to each of these using an SSH terminal.
|
||||
|
||||
### Running commands in parallel with tmux
|
||||
|
||||
|
@ -39,15 +35,14 @@ Extract and install the `etcd` server and the `etcdctl` command line utility:
|
|||
```
|
||||
{
|
||||
sudo mkdir -p /etc/etcd /var/lib/etcd
|
||||
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
|
||||
sudo cp ca.crt etcd-server.key etcd-server.crt /etc/etcd/
|
||||
}
|
||||
```
|
||||
|
||||
The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address for the current compute instance:
|
||||
The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address of the master(etcd) nodes:
|
||||
|
||||
```
|
||||
INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip)
|
||||
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
||||
```
|
||||
|
||||
Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance:
|
||||
|
@ -67,12 +62,12 @@ Documentation=https://github.com/coreos
|
|||
[Service]
|
||||
ExecStart=/usr/local/bin/etcd \\
|
||||
--name ${ETCD_NAME} \\
|
||||
--cert-file=/etc/etcd/kubernetes.pem \\
|
||||
--key-file=/etc/etcd/kubernetes-key.pem \\
|
||||
--peer-cert-file=/etc/etcd/kubernetes.pem \\
|
||||
--peer-key-file=/etc/etcd/kubernetes-key.pem \\
|
||||
--trusted-ca-file=/etc/etcd/ca.pem \\
|
||||
--peer-trusted-ca-file=/etc/etcd/ca.pem \\
|
||||
--cert-file=/etc/etcd/etcd-server.crt \\
|
||||
--key-file=/etc/etcd/etcd-server.key \\
|
||||
--peer-cert-file=/etc/etcd/etcd-server.crt \\
|
||||
--peer-key-file=/etc/etcd/etcd-server.key \\
|
||||
--trusted-ca-file=/etc/etcd/ca.crt \\
|
||||
--peer-trusted-ca-file=/etc/etcd/ca.crt \\
|
||||
--peer-client-cert-auth \\
|
||||
--client-cert-auth \\
|
||||
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
|
||||
|
@ -80,7 +75,7 @@ ExecStart=/usr/local/bin/etcd \\
|
|||
--listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
|
||||
--advertise-client-urls https://${INTERNAL_IP}:2379 \\
|
||||
--initial-cluster-token etcd-cluster-0 \\
|
||||
--initial-cluster controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380,controller-2=https://10.240.0.12:2380 \\
|
||||
--initial-cluster master-1=https://192.168.5.11:2380,master-2=https://192.168.5.12:2380 \\
|
||||
--initial-cluster-state new \\
|
||||
--data-dir=/var/lib/etcd
|
||||
Restart=on-failure
|
||||
|
@ -101,7 +96,7 @@ EOF
|
|||
}
|
||||
```
|
||||
|
||||
> Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`.
|
||||
> Remember to run the above commands on each controller node: `master-1`, and `master-2`.
|
||||
|
||||
## Verification
|
||||
|
||||
|
@ -110,17 +105,16 @@ List the etcd cluster members:
|
|||
```
|
||||
sudo ETCDCTL_API=3 etcdctl member list \
|
||||
--endpoints=https://127.0.0.1:2379 \
|
||||
--cacert=/etc/etcd/ca.pem \
|
||||
--cert=/etc/etcd/kubernetes.pem \
|
||||
--key=/etc/etcd/kubernetes-key.pem
|
||||
--cacert=/etc/etcd/ca.crt \
|
||||
--cert=/etc/etcd/etcd-server.crt \
|
||||
--key=/etc/etcd/etcd-server.key
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
3a57933972cb5131, started, controller-2, https://10.240.0.12:2380, https://10.240.0.12:2379
|
||||
f98dc20bce6225a0, started, controller-0, https://10.240.0.10:2380, https://10.240.0.10:2379
|
||||
ffed16798470cab5, started, controller-1, https://10.240.0.11:2380, https://10.240.0.11:2379
|
||||
45bf9ccad8d8900a, started, master-2, https://192.168.5.12:2380, https://192.168.5.12:2379
|
||||
54a5796a6803f252, started, master-1, https://192.168.5.11:2380, https://192.168.5.11:2379
|
||||
```
|
||||
|
||||
Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md)
|
||||
|
|
|
@ -1,14 +1,10 @@
|
|||
# Bootstrapping the Kubernetes Control Plane
|
||||
|
||||
In this lab you will bootstrap the Kubernetes control plane across three compute instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager.
|
||||
In this lab you will bootstrap the Kubernetes control plane across 2 compute instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The commands in this lab must be run on each controller instance: `controller-0`, `controller-1`, and `controller-2`. Login to each controller instance using the `gcloud` command. Example:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
```
|
||||
The commands in this lab must be run on each controller instance: `controller-1`, and `controller-2`. Login to each controller instance using SSH Terminal. Example:
|
||||
|
||||
### Running commands in parallel with tmux
|
||||
|
||||
|
@ -28,10 +24,10 @@ Download the official Kubernetes release binaries:
|
|||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-apiserver" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-controller-manager" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-scheduler" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl"
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-apiserver" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-controller-manager" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-scheduler" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl"
|
||||
```
|
||||
|
||||
Install the Kubernetes binaries:
|
||||
|
@ -49,8 +45,9 @@ Install the Kubernetes binaries:
|
|||
{
|
||||
sudo mkdir -p /var/lib/kubernetes/
|
||||
|
||||
sudo mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
|
||||
service-account-key.pem service-account.pem \
|
||||
sudo mv ca.crt ca.key kube-apiserver.crt kube-apiserver.key \
|
||||
service-account.key service-account.crt \
|
||||
etcd-server.key etcd-server.crt \
|
||||
encryption-config.yaml /var/lib/kubernetes/
|
||||
}
|
||||
```
|
||||
|
@ -58,8 +55,13 @@ Install the Kubernetes binaries:
|
|||
The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance:
|
||||
|
||||
```
|
||||
INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip)
|
||||
INTERNAL_IP=$(ip addr show enp0s8 | grep "inet " | awk '{print $2}' | cut -d / -f 1)
|
||||
```
|
||||
|
||||
Verify it is set
|
||||
|
||||
```
|
||||
echo $INTERNAL_IP
|
||||
```
|
||||
|
||||
Create the `kube-apiserver.service` systemd unit file:
|
||||
|
@ -81,25 +83,26 @@ ExecStart=/usr/local/bin/kube-apiserver \\
|
|||
--audit-log-path=/var/log/audit.log \\
|
||||
--authorization-mode=Node,RBAC \\
|
||||
--bind-address=0.0.0.0 \\
|
||||
--client-ca-file=/var/lib/kubernetes/ca.pem \\
|
||||
--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
|
||||
--client-ca-file=/var/lib/kubernetes/ca.crt \\
|
||||
--enable-admission-plugins=NodeRestriction,ServiceAccount \\
|
||||
--enable-swagger-ui=true \\
|
||||
--etcd-cafile=/var/lib/kubernetes/ca.pem \\
|
||||
--etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\
|
||||
--etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\
|
||||
--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379,https://10.240.0.12:2379 \\
|
||||
--enable-bootstrap-token-auth=true \\
|
||||
--etcd-cafile=/var/lib/kubernetes/ca.crt \\
|
||||
--etcd-certfile=/var/lib/kubernetes/etcd-server.crt \\
|
||||
--etcd-keyfile=/var/lib/kubernetes/etcd-server.key \\
|
||||
--etcd-servers=https://192.168.5.11:2379,https://192.168.5.12:2379 \\
|
||||
--event-ttl=1h \\
|
||||
--experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
|
||||
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\
|
||||
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\
|
||||
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\
|
||||
--encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
|
||||
--kubelet-certificate-authority=/var/lib/kubernetes/ca.crt \\
|
||||
--kubelet-client-certificate=/var/lib/kubernetes/kube-apiserver.crt \\
|
||||
--kubelet-client-key=/var/lib/kubernetes/kube-apiserver.key \\
|
||||
--kubelet-https=true \\
|
||||
--runtime-config=api/all \\
|
||||
--service-account-key-file=/var/lib/kubernetes/service-account.pem \\
|
||||
--service-cluster-ip-range=10.32.0.0/24 \\
|
||||
--service-account-key-file=/var/lib/kubernetes/service-account.crt \\
|
||||
--service-cluster-ip-range=10.96.0.0/24 \\
|
||||
--service-node-port-range=30000-32767 \\
|
||||
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\
|
||||
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
|
||||
--tls-cert-file=/var/lib/kubernetes/kube-apiserver.crt \\
|
||||
--tls-private-key-file=/var/lib/kubernetes/kube-apiserver.key \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
@ -128,15 +131,15 @@ Documentation=https://github.com/kubernetes/kubernetes
|
|||
[Service]
|
||||
ExecStart=/usr/local/bin/kube-controller-manager \\
|
||||
--address=0.0.0.0 \\
|
||||
--cluster-cidr=10.200.0.0/16 \\
|
||||
--cluster-cidr=192.168.5.0/24 \\
|
||||
--cluster-name=kubernetes \\
|
||||
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\
|
||||
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\
|
||||
--cluster-signing-cert-file=/var/lib/kubernetes/ca.crt \\
|
||||
--cluster-signing-key-file=/var/lib/kubernetes/ca.key \\
|
||||
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
|
||||
--leader-elect=true \\
|
||||
--root-ca-file=/var/lib/kubernetes/ca.pem \\
|
||||
--service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\
|
||||
--service-cluster-ip-range=10.32.0.0/24 \\
|
||||
--root-ca-file=/var/lib/kubernetes/ca.crt \\
|
||||
--service-account-private-key-file=/var/lib/kubernetes/service-account.key \\
|
||||
--service-cluster-ip-range=10.96.0.0/24 \\
|
||||
--use-service-account-credentials=true \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
|
@ -155,19 +158,6 @@ Move the `kube-scheduler` kubeconfig into place:
|
|||
sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
Create the `kube-scheduler.yaml` configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml
|
||||
apiVersion: componentconfig/v1alpha1
|
||||
kind: KubeSchedulerConfiguration
|
||||
clientConnection:
|
||||
kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
|
||||
leaderElection:
|
||||
leaderElect: true
|
||||
EOF
|
||||
```
|
||||
|
||||
Create the `kube-scheduler.service` systemd unit file:
|
||||
|
||||
```
|
||||
|
@ -178,7 +168,9 @@ Documentation=https://github.com/kubernetes/kubernetes
|
|||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/kube-scheduler \\
|
||||
--config=/etc/kubernetes/config/kube-scheduler.yaml \\
|
||||
--kubeconfig=/var/lib/kubernetes/kube-scheduler.kubeconfig \\
|
||||
--address=127.0.0.1 \\
|
||||
--leader-elect=true \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
@ -200,48 +192,6 @@ EOF
|
|||
|
||||
> Allow up to 10 seconds for the Kubernetes API Server to fully initialize.
|
||||
|
||||
### Enable HTTP Health Checks
|
||||
|
||||
A [Google Network Load Balancer](https://cloud.google.com/compute/docs/load-balancing/network) will be used to distribute traffic across the three API servers and allow each API server to terminate TLS connections and validate client certificates. The network load balancer only supports HTTP health checks which means the HTTPS endpoint exposed by the API server cannot be used. As a workaround the nginx webserver can be used to proxy HTTP health checks. In this section nginx will be installed and configured to accept HTTP health checks on port `80` and proxy the connections to the API server on `https://127.0.0.1:6443/healthz`.
|
||||
|
||||
> The `/healthz` API server endpoint does not require authentication by default.
|
||||
|
||||
Install a basic web server to handle HTTP health checks:
|
||||
|
||||
```
|
||||
sudo apt-get install -y nginx
|
||||
```
|
||||
|
||||
```
|
||||
cat > kubernetes.default.svc.cluster.local <<EOF
|
||||
server {
|
||||
listen 80;
|
||||
server_name kubernetes.default.svc.cluster.local;
|
||||
|
||||
location /healthz {
|
||||
proxy_pass https://127.0.0.1:6443/healthz;
|
||||
proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
sudo mv kubernetes.default.svc.cluster.local \
|
||||
/etc/nginx/sites-available/kubernetes.default.svc.cluster.local
|
||||
|
||||
sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
sudo systemctl restart nginx
|
||||
```
|
||||
|
||||
```
|
||||
sudo systemctl enable nginx
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
|
@ -253,143 +203,52 @@ kubectl get componentstatuses --kubeconfig admin.kubeconfig
|
|||
NAME STATUS MESSAGE ERROR
|
||||
controller-manager Healthy ok
|
||||
scheduler Healthy ok
|
||||
etcd-2 Healthy {"health": "true"}
|
||||
etcd-0 Healthy {"health": "true"}
|
||||
etcd-1 Healthy {"health": "true"}
|
||||
```
|
||||
|
||||
Test the nginx HTTP health check proxy:
|
||||
|
||||
```
|
||||
curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1/healthz
|
||||
```
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.14.0 (Ubuntu)
|
||||
Date: Sun, 30 Sep 2018 17:44:24 GMT
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
Content-Length: 2
|
||||
Connection: keep-alive
|
||||
|
||||
ok
|
||||
```
|
||||
|
||||
> Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`.
|
||||
|
||||
## RBAC for Kubelet Authorization
|
||||
|
||||
In this section you will configure RBAC permissions to allow the Kubernetes API Server to access the Kubelet API on each worker node. Access to the Kubelet API is required for retrieving metrics, logs, and executing commands in pods.
|
||||
|
||||
> This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization.
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
```
|
||||
|
||||
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods:
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:kube-apiserver-to-kubelet
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/proxy
|
||||
- nodes/stats
|
||||
- nodes/log
|
||||
- nodes/spec
|
||||
- nodes/metrics
|
||||
verbs:
|
||||
- "*"
|
||||
EOF
|
||||
```
|
||||
|
||||
The Kubernetes API Server authenticates to the Kubelet as the `kubernetes` user using the client certificate as defined by the `--kubelet-client-certificate` flag.
|
||||
|
||||
Bind the `system:kube-apiserver-to-kubelet` ClusterRole to the `kubernetes` user:
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:kube-apiserver
|
||||
namespace: ""
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-apiserver-to-kubelet
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: kubernetes
|
||||
EOF
|
||||
```
|
||||
> Remember to run the above commands on each controller node: `master-1`, and `master-2`.
|
||||
|
||||
## The Kubernetes Frontend Load Balancer
|
||||
|
||||
In this section you will provision an external load balancer to front the Kubernetes API Servers. The `kubernetes-the-hard-way` static IP address will be attached to the resulting load balancer.
|
||||
|
||||
> The compute instances created in this tutorial will not have permission to complete this section. Run the following commands from the same machine used to create the compute instances.
|
||||
|
||||
|
||||
### Provision a Network Load Balancer
|
||||
|
||||
Create the external load balancer network resources:
|
||||
```
|
||||
#Install HAProxy
|
||||
apt-get update && apt-get install -y haproxy
|
||||
|
||||
```
|
||||
{
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
|
||||
gcloud compute http-health-checks create kubernetes \
|
||||
--description "Kubernetes Health Check" \
|
||||
--host "kubernetes.default.svc.cluster.local" \
|
||||
--request-path "/healthz"
|
||||
```
|
||||
cat > /etc/haproxy/haproxy.cfg <<EOF
|
||||
frontend kubernetes
|
||||
bind 192.168.5.30:6443
|
||||
option tcplog
|
||||
mode tcp
|
||||
default_backend kubernetes-master-nodes
|
||||
|
||||
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-health-check \
|
||||
--network kubernetes-the-hard-way \
|
||||
--source-ranges 209.85.152.0/22,209.85.204.0/22,35.191.0.0/16 \
|
||||
--allow tcp
|
||||
backend kubernetes-master-nodes
|
||||
mode tcp
|
||||
balance roundrobin
|
||||
option tcp-check
|
||||
server master-1 192.168.5.11:6443 check fall 3 rise 2
|
||||
server master-2 192.168.5.12:6443 check fall 3 rise 2
|
||||
EOF
|
||||
```
|
||||
|
||||
gcloud compute target-pools create kubernetes-target-pool \
|
||||
--http-health-check kubernetes
|
||||
|
||||
gcloud compute target-pools add-instances kubernetes-target-pool \
|
||||
--instances controller-0,controller-1,controller-2
|
||||
|
||||
gcloud compute forwarding-rules create kubernetes-forwarding-rule \
|
||||
--address ${KUBERNETES_PUBLIC_ADDRESS} \
|
||||
--ports 6443 \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--target-pool kubernetes-target-pool
|
||||
}
|
||||
```
|
||||
sudo service haproxy start
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
Retrieve the `kubernetes-the-hard-way` static IP address:
|
||||
|
||||
```
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
```
|
||||
|
||||
Make a HTTP request for the Kubernetes version info:
|
||||
|
||||
```
|
||||
curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version
|
||||
curl https://192.168.5.30:6443/version -k
|
||||
```
|
||||
|
||||
> output
|
||||
|
@ -397,12 +256,12 @@ curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version
|
|||
```
|
||||
{
|
||||
"major": "1",
|
||||
"minor": "12",
|
||||
"gitVersion": "v1.12.0",
|
||||
"gitCommit": "0ed33881dc4355495f623c6f22e7dd0b7632b7c0",
|
||||
"minor": "13",
|
||||
"gitVersion": "v1.13.0",
|
||||
"gitCommit": "ddf47ac13c1a9483ea035a79cd7c10005ff21a6d",
|
||||
"gitTreeState": "clean",
|
||||
"buildDate": "2018-09-27T16:55:41Z",
|
||||
"goVersion": "go1.10.4",
|
||||
"buildDate": "2018-12-03T20:56:12Z",
|
||||
"goVersion": "go1.11.2",
|
||||
"compiler": "gc",
|
||||
"platform": "linux/amd64"
|
||||
}
|
||||
|
|
|
@ -1,44 +1,63 @@
|
|||
# Bootstrapping the Kubernetes Worker Nodes
|
||||
|
||||
In this lab you will bootstrap three Kubernetes worker nodes. The following components will be installed on each node: [runc](https://github.com/opencontainers/runc), [gVisor](https://github.com/google/gvisor), [container networking plugins](https://github.com/containernetworking/cni), [containerd](https://github.com/containerd/containerd), [kubelet](https://kubernetes.io/docs/admin/kubelet), and [kube-proxy](https://kubernetes.io/docs/concepts/cluster-administration/proxies).
|
||||
In this lab you will bootstrap 2 Kubernetes worker nodes. We already have [Docker](https://www.docker.com) installed on these nodes.
|
||||
|
||||
We will now install the kubernetes components
|
||||
- [kubelet](https://kubernetes.io/docs/admin/kubelet)
|
||||
- [kube-proxy](https://kubernetes.io/docs/concepts/cluster-administration/proxies).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The commands in this lab must be run on each worker instance: `worker-0`, `worker-1`, and `worker-2`. Login to each worker instance using the `gcloud` command. Example:
|
||||
The commands in this lab must be run on first worker instance: `worker-1`. Login to first worker instance using SSH Terminal.
|
||||
|
||||
### Provisioning Kubelet Client Certificates
|
||||
|
||||
Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/docs/admin/authorization/node/) called Node Authorizer, that specifically authorizes API requests made by [Kubelets](https://kubernetes.io/docs/concepts/overview/components/#kubelet). In order to be authorized by the Node Authorizer, Kubelets must use a credential that identifies them as being in the `system:nodes` group, with a username of `system:node:<nodeName>`. In this section you will create a certificate for each Kubernetes worker node that meets the Node Authorizer requirements.
|
||||
|
||||
Generate a certificate and private key for one worker node:
|
||||
|
||||
Worker1:
|
||||
|
||||
```
|
||||
gcloud compute ssh worker-0
|
||||
cat > openssl-worker-1.cnf <<EOF
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = worker-1
|
||||
IP.1 = 192.168.5.11
|
||||
EOF
|
||||
|
||||
openssl genrsa -out worker-1.key 2048
|
||||
openssl req -new -key worker-1.key -subj "/CN=system:node:worker-1/O=system:nodes" -out worker-1.csr -config openssl-worker-1.cnf
|
||||
openssl x509 -req -in worker-1.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out worker-1.crt -extensions v3_req -extfile openssl-worker-1.cnf
|
||||
```
|
||||
|
||||
### Running commands in parallel with tmux
|
||||
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
||||
|
||||
## Provisioning a Kubernetes Worker Node
|
||||
|
||||
Install the OS dependencies:
|
||||
Results:
|
||||
|
||||
```
|
||||
{
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install socat conntrack ipset
|
||||
}
|
||||
worker-1.key
|
||||
worker-1.crt
|
||||
```
|
||||
|
||||
> The socat binary enables support for the `kubectl port-forward` command.
|
||||
Copy the appropriate certificates and private keys to the worker node:
|
||||
|
||||
```
|
||||
scp ca.crt worker-1.crt worker-1.key worker-1:~/
|
||||
```
|
||||
|
||||
### Download and Install Worker Binaries
|
||||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.12.0/crictl-v1.12.0-linux-amd64.tar.gz \
|
||||
https://storage.googleapis.com/kubernetes-the-hard-way/runsc-50c283b9f56bb7200938d9e207355f05f79f0d17 \
|
||||
https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/runc.amd64 \
|
||||
https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz \
|
||||
https://github.com/containerd/containerd/releases/download/v1.2.0-rc.0/containerd-1.2.0-rc.0.linux-amd64.tar.gz \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-proxy \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubelet
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-proxy \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubelet
|
||||
```
|
||||
|
||||
Create the installation directories:
|
||||
|
@ -57,121 +76,18 @@ Install the worker binaries:
|
|||
|
||||
```
|
||||
{
|
||||
sudo mv runsc-50c283b9f56bb7200938d9e207355f05f79f0d17 runsc
|
||||
sudo mv runc.amd64 runc
|
||||
chmod +x kubectl kube-proxy kubelet runc runsc
|
||||
sudo mv kubectl kube-proxy kubelet runc runsc /usr/local/bin/
|
||||
sudo tar -xvf crictl-v1.12.0-linux-amd64.tar.gz -C /usr/local/bin/
|
||||
sudo tar -xvf cni-plugins-amd64-v0.6.0.tgz -C /opt/cni/bin/
|
||||
sudo tar -xvf containerd-1.2.0-rc.0.linux-amd64.tar.gz -C /
|
||||
chmod +x kubectl kube-proxy kubelet
|
||||
sudo mv kubectl kube-proxy kubelet /usr/local/bin/
|
||||
}
|
||||
```
|
||||
|
||||
### Configure CNI Networking
|
||||
|
||||
Retrieve the Pod CIDR range for the current compute instance:
|
||||
|
||||
```
|
||||
POD_CIDR=$(curl -s -H "Metadata-Flavor: Google" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/pod-cidr)
|
||||
```
|
||||
|
||||
Create the `bridge` network configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "bridge",
|
||||
"type": "bridge",
|
||||
"bridge": "cnio0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"ranges": [
|
||||
[{"subnet": "${POD_CIDR}"}]
|
||||
],
|
||||
"routes": [{"dst": "0.0.0.0/0"}]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
Create the `loopback` network configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"type": "loopback"
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
### Configure containerd
|
||||
|
||||
Create the `containerd` configuration file:
|
||||
|
||||
```
|
||||
sudo mkdir -p /etc/containerd/
|
||||
```
|
||||
|
||||
```
|
||||
cat << EOF | sudo tee /etc/containerd/config.toml
|
||||
[plugins]
|
||||
[plugins.cri.containerd]
|
||||
snapshotter = "overlayfs"
|
||||
[plugins.cri.containerd.default_runtime]
|
||||
runtime_type = "io.containerd.runtime.v1.linux"
|
||||
runtime_engine = "/usr/local/bin/runc"
|
||||
runtime_root = ""
|
||||
[plugins.cri.containerd.untrusted_workload_runtime]
|
||||
runtime_type = "io.containerd.runtime.v1.linux"
|
||||
runtime_engine = "/usr/local/bin/runsc"
|
||||
runtime_root = "/run/containerd/runsc"
|
||||
[plugins.cri.containerd.gvisor]
|
||||
runtime_type = "io.containerd.runtime.v1.linux"
|
||||
runtime_engine = "/usr/local/bin/runsc"
|
||||
runtime_root = "/run/containerd/runsc"
|
||||
EOF
|
||||
```
|
||||
|
||||
> Untrusted workloads will be run using the gVisor (runsc) runtime.
|
||||
|
||||
Create the `containerd.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/containerd.service
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStartPre=/sbin/modprobe overlay
|
||||
ExecStart=/bin/containerd
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
OOMScoreAdjust=-999
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
|
||||
### Configure the Kubelet
|
||||
|
||||
```
|
||||
{
|
||||
sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/
|
||||
sudo mv ${HOSTNAME}.key ${HOSTNAME}.crt /var/lib/kubelet/
|
||||
sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig
|
||||
sudo mv ca.pem /var/lib/kubernetes/
|
||||
sudo mv ca.crt /var/lib/kubernetes/
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -187,21 +103,18 @@ authentication:
|
|||
webhook:
|
||||
enabled: true
|
||||
x509:
|
||||
clientCAFile: "/var/lib/kubernetes/ca.pem"
|
||||
clientCAFile: "/var/lib/kubernetes/ca.crt"
|
||||
authorization:
|
||||
mode: Webhook
|
||||
clusterDomain: "cluster.local"
|
||||
clusterDNS:
|
||||
- "10.32.0.10"
|
||||
podCIDR: "${POD_CIDR}"
|
||||
- "10.96.0.10"
|
||||
resolvConf: "/run/systemd/resolve/resolv.conf"
|
||||
runtimeRequestTimeout: "15m"
|
||||
tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem"
|
||||
tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem"
|
||||
EOF
|
||||
```
|
||||
|
||||
> The `resolvConf` configuration is used to avoid loops when using CoreDNS for service discovery on systems running `systemd-resolved`.
|
||||
> The `resolvConf` configuration is used to avoid loops when using CoreDNS for service discovery on systems running `systemd-resolved`.
|
||||
|
||||
Create the `kubelet.service` systemd unit file:
|
||||
|
||||
|
@ -210,16 +123,16 @@ cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
|||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=containerd.service
|
||||
Requires=containerd.service
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/kubelet \\
|
||||
--config=/var/lib/kubelet/kubelet-config.yaml \\
|
||||
--container-runtime=remote \\
|
||||
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
|
||||
--image-pull-progress-deadline=2m \\
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
||||
--tls-cert-file=/var/lib/kubelet/${HOSTNAME}.crt \\
|
||||
--tls-private-key-file=/var/lib/kubelet/${HOSTNAME}.key \\
|
||||
--network-plugin=cni \\
|
||||
--register-node=true \\
|
||||
--v=2
|
||||
|
@ -246,7 +159,7 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
|||
clientConnection:
|
||||
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
|
||||
mode: "iptables"
|
||||
clusterCIDR: "10.200.0.0/16"
|
||||
clusterCIDR: "192.168.5.0/24"
|
||||
EOF
|
||||
```
|
||||
|
||||
|
@ -274,31 +187,30 @@ EOF
|
|||
```
|
||||
{
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable containerd kubelet kube-proxy
|
||||
sudo systemctl start containerd kubelet kube-proxy
|
||||
sudo systemctl enable kubelet kube-proxy
|
||||
sudo systemctl start kubelet kube-proxy
|
||||
}
|
||||
```
|
||||
|
||||
> Remember to run the above commands on each worker node: `worker-0`, `worker-1`, and `worker-2`.
|
||||
> Remember to run the above commands on worker node: `worker-1`
|
||||
|
||||
## Verification
|
||||
|
||||
> The compute instances created in this tutorial will not have permission to complete this section. Run the following commands from the same machine used to create the compute instances.
|
||||
|
||||
List the registered Kubernetes nodes:
|
||||
List the registered Kubernetes nodes from the master node:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0 \
|
||||
--command "kubectl get nodes --kubeconfig admin.kubeconfig"
|
||||
master-1$ kubectl get nodes --kubeconfig admin.kubeconfig
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-0 Ready <none> 35s v1.12.0
|
||||
worker-1 Ready <none> 36s v1.12.0
|
||||
worker-2 Ready <none> 36s v1.12.0
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-1 NotReady <none> 93s v1.13.0
|
||||
```
|
||||
|
||||
> Note: It is OK for the worker node to be in a NotReady state.
|
||||
That is because we haven't configured Networking yet.
|
||||
|
||||
Next: [Configuring kubectl for Remote Access](10-configuring-kubectl.md)
|
||||
|
|
|
@ -0,0 +1,401 @@
|
|||
# TLS Bootstrapping Worker Nodes
|
||||
|
||||
In the previous step we configured a worker node by
|
||||
- Creating a set of key pairs for the worker node by ourself
|
||||
- Getting them signed by the CA by ourself
|
||||
- Creating a kube-config file using this certificate by ourself
|
||||
- Everytime the certificate expires we must follow the same process of updating the certificate by ourself
|
||||
|
||||
This is not a practical approach when you have 1000s of nodes in the cluster, and nodes dynamically being added and removed from the cluster. With TLS boostrapping:
|
||||
|
||||
- The Nodes can generate certificate key pairs by themselves
|
||||
- The Nodes can generate certificate signing request by themselves
|
||||
- The Nodes can submit the certificate signing request to the Kubernetes CA (Using the Certificates API)
|
||||
- The Nodes can retrieve the signed certificate from the Kubernetes CA
|
||||
- The Nodes can generate a kube-config file using this certificate by themselves
|
||||
- The Nodes can start and join the cluster by themselves
|
||||
- The Nodes can renew certificates when they expire by themselves
|
||||
|
||||
So let's get started!
|
||||
|
||||
# What is required for TLS Bootstrapping
|
||||
|
||||
**Certificates API:** The Certificate API (as discussed in the lecture) provides a set of APIs on Kubernetes that can help us manage certificates (Create CSR, Get them signed by CA, Retrieve signed certificate etc). The worker nodes (kubelets) have the ability to use this API to get certificates signed by the Kubernetes CA.
|
||||
|
||||
# Pre-Requisite
|
||||
|
||||
**kube-apiserver** - Ensure bootstrap token based authentication is enabled on the kube-apiserver.
|
||||
|
||||
`--enable-bootstrap-token-auth=true`
|
||||
|
||||
**kube-controller-manager** - The certificate requests are signed by the kube-controller-manager ultimately. The kube-controller-manager requires the CA Certificate and Key to perform these operations.
|
||||
|
||||
```
|
||||
--cluster-signing-cert-file=/var/lib/kubernetes/ca.crt \\
|
||||
--cluster-signing-key-file=/var/lib/kubernetes/ca.key
|
||||
```
|
||||
|
||||
> Note: We have already configured these in our setup in this course
|
||||
|
||||
Copy the ca certificate to the worker node:
|
||||
|
||||
```
|
||||
scp ca.crt worker-2:~/
|
||||
```
|
||||
|
||||
## Step 1 Configure the Binaries on the Worker node
|
||||
|
||||
### Download and Install Worker Binaries
|
||||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubectl \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kube-proxy \
|
||||
https://storage.googleapis.com/kubernetes-release/release/v1.13.0/bin/linux/amd64/kubelet
|
||||
```
|
||||
|
||||
Create the installation directories:
|
||||
|
||||
```
|
||||
sudo mkdir -p \
|
||||
/etc/cni/net.d \
|
||||
/opt/cni/bin \
|
||||
/var/lib/kubelet \
|
||||
/var/lib/kube-proxy \
|
||||
/var/lib/kubernetes \
|
||||
/var/run/kubernetes
|
||||
```
|
||||
|
||||
Install the worker binaries:
|
||||
|
||||
```
|
||||
{
|
||||
chmod +x kubectl kube-proxy kubelet
|
||||
sudo mv kubectl kube-proxy kubelet /usr/local/bin/
|
||||
}
|
||||
```
|
||||
### Move the ca certificate
|
||||
|
||||
`sudo mv ca.crt /var/lib/kubernetes/`
|
||||
|
||||
# Step 1 Create the Boostrap Token to be used by Nodes(Kubelets) to invoke Certificate API
|
||||
|
||||
For the workers(kubelet) to access the Certificates API, they need to authenticate to the kubernetes api-server first. For this we create a [Bootstrap Token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) to be used by the kubelet
|
||||
|
||||
Bootstrap Tokens take the form of a 6 character token id followed by 16 character token secret separated by a dot. Eg: abcdef.0123456789abcdef. More formally, they must match the regular expression [a-z0-9]{6}\.[a-z0-9]{16}
|
||||
|
||||
Bootstrap Tokens are created as a secret in the kube-system namespace.
|
||||
|
||||
```
|
||||
cat > bootstrap-token-07401b.yaml <<EOF
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
# Name MUST be of form "bootstrap-token-<token id>"
|
||||
name: bootstrap-token-07401b
|
||||
namespace: kube-system
|
||||
|
||||
# Type MUST be 'bootstrap.kubernetes.io/token'
|
||||
type: bootstrap.kubernetes.io/token
|
||||
stringData:
|
||||
# Human readable description. Optional.
|
||||
description: "The default bootstrap token generated by 'kubeadm init'."
|
||||
|
||||
# Token ID and secret. Required.
|
||||
token-id: 07401b
|
||||
token-secret: f395accd246ae52d
|
||||
|
||||
# Expiration. Optional.
|
||||
expiration: 2021-03-10T03:22:11Z
|
||||
|
||||
# Allowed usages.
|
||||
usage-bootstrap-authentication: "true"
|
||||
usage-bootstrap-signing: "true"
|
||||
|
||||
# Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
|
||||
auth-extra-groups: system:bootstrappers:worker
|
||||
EOF
|
||||
|
||||
|
||||
kubectl create -f bootstrap-token-07401b.yaml
|
||||
|
||||
```
|
||||
|
||||
Things to note:
|
||||
- **expiration** - make sure its set to a date in the future.
|
||||
- **auth-extra-groups** - this is the group the worker nodes are part of. It must start with "system:bootstrappers:" This group does not exist already. This group is associated with this token.
|
||||
|
||||
Once this is created the token to be used for authentication is `07401b.f395accd246ae52d`
|
||||
|
||||
|
||||
## Step 2 Authorize workers(kubelets) to create CSR
|
||||
|
||||
Next we associate the group we created before to the system:node-bootstrapper ClusterRole. This ClusterRole gives the group enough permissions to bootstrap the kubelet
|
||||
|
||||
```
|
||||
kubectl create clusterrolebinding create-csrs-for-bootstrapping --clusterrole=system:node-bootstrapper --group=system:bootstrappers
|
||||
|
||||
--------------- OR ---------------
|
||||
|
||||
cat > csrs-for-bootstrapping.yaml <<EOF
|
||||
# enable bootstrapping nodes to create CSR
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: create-csrs-for-bootstrapping
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:bootstrappers
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:node-bootstrapper
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
|
||||
|
||||
kubectl create -f csrs-for-bootstrapping.yaml
|
||||
|
||||
```
|
||||
|
||||
## Step 3 Authorize workers(kubelets) to approve CSR
|
||||
```
|
||||
kubectl create clusterrolebinding auto-approve-csrs-for-group --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers
|
||||
|
||||
--------------- OR ---------------
|
||||
|
||||
cat > auto-approve-csrs-for-group.yaml <<EOF
|
||||
# Approve all CSRs for the group "system:bootstrappers"
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: auto-approve-csrs-for-group
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:bootstrappers
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
|
||||
|
||||
kubectl create -f auto-approve-csrs-for-group.yaml
|
||||
```
|
||||
|
||||
## Step 3 Authorize workers(kubelets) to Auto Renew Certificates on expiration
|
||||
|
||||
We now create the Cluster Role Binding required for the nodes to automatically renew the certificates on expiry. Note that we are NOT using the **system:bootstrappers** group here any more. Since by the renewal period, we believe the node would be bootstrapped and part of the cluster already. All nodes are part of the **system:nodes** group.
|
||||
|
||||
```
|
||||
kubectl create clusterrolebinding auto-approve-renewals-for-nodes --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
|
||||
|
||||
--------------- OR ---------------
|
||||
|
||||
cat > auto-approve-renewals-for-nodes.yaml <<EOF
|
||||
# Approve renewal CSRs for the group "system:nodes"
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: auto-approve-renewals-for-nodes
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:nodes
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
|
||||
|
||||
kubectl create -f auto-approve-renewals-for-nodes.yaml
|
||||
```
|
||||
|
||||
## Step 4 Configure Kubelet to TLS Bootstrap
|
||||
|
||||
It is now time to configure the second worker to TLS bootstrap using the token we generated
|
||||
|
||||
For worker-1 we started by creating a kubeconfig file with the TLS certificates that we manually generated.
|
||||
Here, we don't have the certificates yet. So we cannot create a kubeconfig file. Instead we create a bootstrap-kubeconfig file with information about the token we created.
|
||||
|
||||
This is to be done on the `worker-2` node.
|
||||
|
||||
```
|
||||
kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-cluster bootstrap --server='https://192.168.5.30:6443' --certificate-authority=/var/lib/kubernetes/ca.crt
|
||||
kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-credentials kubelet-bootstrap --token=07401b.f395accd246ae52d
|
||||
kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-context bootstrap --user=kubelet-bootstrap --cluster=bootstrap
|
||||
kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig use-context bootstrap
|
||||
```
|
||||
|
||||
Or
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /var/lib/kubelet/bootstrap-kubeconfig
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /var/lib/kubernetes/ca.crt
|
||||
server: https://192.168.5.30:6443
|
||||
name: bootstrap
|
||||
contexts:
|
||||
- context:
|
||||
cluster: bootstrap
|
||||
user: kubelet-bootstrap
|
||||
name: bootstrap
|
||||
current-context: bootstrap
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubelet-bootstrap
|
||||
user:
|
||||
token: 07401b.f395accd246ae52d
|
||||
EOF
|
||||
```
|
||||
|
||||
## Step 5 Create Kubelet Config File
|
||||
|
||||
Create the `kubelet-config.yaml` configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
authentication:
|
||||
anonymous:
|
||||
enabled: false
|
||||
webhook:
|
||||
enabled: true
|
||||
x509:
|
||||
clientCAFile: "/var/lib/kubernetes/ca.crt"
|
||||
authorization:
|
||||
mode: Webhook
|
||||
clusterDomain: "cluster.local"
|
||||
clusterDNS:
|
||||
- "10.96.0.10"
|
||||
resolvConf: "/run/systemd/resolve/resolv.conf"
|
||||
runtimeRequestTimeout: "15m"
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: We are not specifying the certificate details - tlsCertFile and tlsPrivateKeyFile - in this file
|
||||
|
||||
## Step 6 Configure Kubelet Service
|
||||
|
||||
Create the `kubelet.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/kubelet \\
|
||||
--bootstrap-kubeconfig="/var/lib/kubelet/bootstrap-kubeconfig" \\
|
||||
--config=/var/lib/kubelet/kubelet-config.yaml \\
|
||||
--image-pull-progress-deadline=2m \\
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
||||
--cert-dir=/var/lib/kubelet/pki/ \\
|
||||
--rotate-certificates=true \\
|
||||
--rotate-server-certificates=true \\
|
||||
--network-plugin=cni \\
|
||||
--register-node=true \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
|
||||
Things to note here:
|
||||
- **bootstrap-kubeconfig**: Location of the bootstrap-kubeconfig file.
|
||||
- **cert-dir**: The directory where the generated certificates are stored.
|
||||
- **rotate-certificates**: Rotates client certificates when they expire.
|
||||
- **rotate-server-certificates**: Requests for server certificates on bootstrap and rotates them when they expire.
|
||||
|
||||
## Step 7 Configure the Kubernetes Proxy
|
||||
|
||||
```
|
||||
sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
|
||||
```
|
||||
|
||||
Create the `kube-proxy-config.yaml` configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
|
||||
kind: KubeProxyConfiguration
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
clientConnection:
|
||||
kubeconfig: "/var/lib/kube-proxy/kubeconfig"
|
||||
mode: "iptables"
|
||||
clusterCIDR: "192.168.5.0/24"
|
||||
EOF
|
||||
```
|
||||
|
||||
Create the `kube-proxy.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
|
||||
[Unit]
|
||||
Description=Kubernetes Kube Proxy
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/kube-proxy \\
|
||||
--config=/var/lib/kube-proxy/kube-proxy-config.yaml
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
|
||||
## Step 8 Start the Worker Services
|
||||
|
||||
```
|
||||
{
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable kubelet kube-proxy
|
||||
sudo systemctl start kubelet kube-proxy
|
||||
}
|
||||
```
|
||||
> Remember to run the above commands on worker node: `worker-2`
|
||||
|
||||
|
||||
## Step 9 Approve Server CSR
|
||||
|
||||
`kubectl get csr`
|
||||
|
||||
```
|
||||
NAME AGE REQUESTOR CONDITION
|
||||
csr-95bv6 20s system:node:worker-2 Pending
|
||||
```
|
||||
|
||||
|
||||
Approve
|
||||
|
||||
`kubectl certificate approve csr-95bv6`
|
||||
|
||||
|
||||
## Verification
|
||||
|
||||
List the registered Kubernetes nodes from the master node:
|
||||
|
||||
```
|
||||
master-1$ kubectl get nodes --kubeconfig admin.kubeconfig
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-1 Ready <none> 12h v1.13.0
|
||||
worker-2 Ready <none> 37m v1.13.0
|
||||
```
|
|
@ -12,18 +12,16 @@ Generate a kubeconfig file suitable for authenticating as the `admin` user:
|
|||
|
||||
```
|
||||
{
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
KUBERNETES_LB_ADDRESS=192.168.5.30
|
||||
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--certificate-authority=ca.crt \
|
||||
--embed-certs=true \
|
||||
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443
|
||||
--server=https://${KUBERNETES_LB_ADDRESS}:6443
|
||||
|
||||
kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem
|
||||
--client-certificate=admin.crt \
|
||||
--client-key=admin.key
|
||||
|
||||
kubectl config set-context kubernetes-the-hard-way \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
|
@ -48,7 +46,6 @@ NAME STATUS MESSAGE ERROR
|
|||
controller-manager Healthy ok
|
||||
scheduler Healthy ok
|
||||
etcd-1 Healthy {"health":"true"}
|
||||
etcd-2 Healthy {"health":"true"}
|
||||
etcd-0 Healthy {"health":"true"}
|
||||
```
|
||||
|
||||
|
@ -62,9 +59,8 @@ kubectl get nodes
|
|||
|
||||
```
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-0 Ready <none> 117s v1.12.0
|
||||
worker-1 Ready <none> 118s v1.12.0
|
||||
worker-2 Ready <none> 118s v1.12.0
|
||||
worker-1 Ready <none> 118s v1.13.0
|
||||
worker-2 Ready <none> 118s v1.13.0
|
||||
```
|
||||
|
||||
Next: [Provisioning Pod Network Routes](11-pod-network-routes.md)
|
||||
Next: [Deploy Pod Networking](12-configure-pod-networking.md)
|
|
@ -1,60 +0,0 @@
|
|||
# Provisioning Pod Network Routes
|
||||
|
||||
Pods scheduled to a node receive an IP address from the node's Pod CIDR range. At this point pods can not communicate with other pods running on different nodes due to missing network [routes](https://cloud.google.com/compute/docs/vpc/routes).
|
||||
|
||||
In this lab you will create a route for each worker node that maps the node's Pod CIDR range to the node's internal IP address.
|
||||
|
||||
> There are [other ways](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-achieve-this) to implement the Kubernetes networking model.
|
||||
|
||||
## The Routing Table
|
||||
|
||||
In this section you will gather the information required to create routes in the `kubernetes-the-hard-way` VPC network.
|
||||
|
||||
Print the internal IP address and Pod CIDR range for each worker instance:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
gcloud compute instances describe ${instance} \
|
||||
--format 'value[separator=" "](networkInterfaces[0].networkIP,metadata.items[0].value)'
|
||||
done
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
10.240.0.20 10.200.0.0/24
|
||||
10.240.0.21 10.200.1.0/24
|
||||
10.240.0.22 10.200.2.0/24
|
||||
```
|
||||
|
||||
## Routes
|
||||
|
||||
Create network routes for each worker instance:
|
||||
|
||||
```
|
||||
for i in 0 1 2; do
|
||||
gcloud compute routes create kubernetes-route-10-200-${i}-0-24 \
|
||||
--network kubernetes-the-hard-way \
|
||||
--next-hop-address 10.240.0.2${i} \
|
||||
--destination-range 10.200.${i}.0/24
|
||||
done
|
||||
```
|
||||
|
||||
List the routes in the `kubernetes-the-hard-way` VPC network:
|
||||
|
||||
```
|
||||
gcloud compute routes list --filter "network: kubernetes-the-hard-way"
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME NETWORK DEST_RANGE NEXT_HOP PRIORITY
|
||||
default-route-081879136902de56 kubernetes-the-hard-way 10.240.0.0/24 kubernetes-the-hard-way 1000
|
||||
default-route-55199a5aa126d7aa kubernetes-the-hard-way 0.0.0.0/0 default-internet-gateway 1000
|
||||
kubernetes-route-10-200-0-0-24 kubernetes-the-hard-way 10.200.0.0/24 10.240.0.20 1000
|
||||
kubernetes-route-10-200-1-0-24 kubernetes-the-hard-way 10.200.1.0/24 10.240.0.21 1000
|
||||
kubernetes-route-10-200-2-0-24 kubernetes-the-hard-way 10.200.2.0/24 10.240.0.22 1000
|
||||
```
|
||||
|
||||
Next: [Deploying the DNS Cluster Add-on](12-dns-addon.md)
|
|
@ -0,0 +1,30 @@
|
|||
# Provisioning Pod Network
|
||||
|
||||
We chose to use CNI - [weave](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) as our networking option.
|
||||
|
||||
Download the CNI Plugins required for weave on each of the worker nodes - `worker-1` and `worker-2`
|
||||
`wget https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz`
|
||||
|
||||
Extract it to /opt/cni/bin directory
|
||||
`sudo tar -xzvf cni-plugins-amd64-v0.7.5.tgz --directory /opt/cni/bin/``
|
||||
|
||||
|
||||
Deploy weave network. Run only once on the `master` node.
|
||||
`kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"`
|
||||
|
||||
Weave uses POD CIDR of `10.32.0.0/12` by default.
|
||||
|
||||
## Verification
|
||||
|
||||
List the registered Kubernetes nodes from the master node:
|
||||
|
||||
```
|
||||
master-1$ kubectl get pods -n kube-system
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
weave-net-cfzk5 2/2 Running 0 41m
|
||||
```
|
|
@ -7,7 +7,7 @@ In this lab you will deploy the [DNS add-on](https://kubernetes.io/docs/concepts
|
|||
Deploy the `coredns` cluster add-on:
|
||||
|
||||
```
|
||||
kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns.yaml
|
||||
kubectl apply -f deployments/kube-dns.yaml
|
||||
```
|
||||
|
||||
> output
|
||||
|
@ -71,11 +71,11 @@ kubectl exec -ti $POD_NAME -- nslookup kubernetes
|
|||
> output
|
||||
|
||||
```
|
||||
Server: 10.32.0.10
|
||||
Address 1: 10.32.0.10 kube-dns.kube-system.svc.cluster.local
|
||||
Server: 10.96.0.10
|
||||
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
|
||||
|
||||
Name: kubernetes
|
||||
Address 1: 10.32.0.1 kubernetes.default.svc.cluster.local
|
||||
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
|
||||
```
|
||||
|
||||
Next: [Smoke Test](13-smoke-test.md)
|
|
@ -1,55 +0,0 @@
|
|||
# Cleaning Up
|
||||
|
||||
In this lab you will delete the compute resources created during this tutorial.
|
||||
|
||||
## Compute Instances
|
||||
|
||||
Delete the controller and worker compute instances:
|
||||
|
||||
```
|
||||
gcloud -q compute instances delete \
|
||||
controller-0 controller-1 controller-2 \
|
||||
worker-0 worker-1 worker-2
|
||||
```
|
||||
|
||||
## Networking
|
||||
|
||||
Delete the external load balancer network resources:
|
||||
|
||||
```
|
||||
{
|
||||
gcloud -q compute forwarding-rules delete kubernetes-forwarding-rule \
|
||||
--region $(gcloud config get-value compute/region)
|
||||
|
||||
gcloud -q compute target-pools delete kubernetes-target-pool
|
||||
|
||||
gcloud -q compute http-health-checks delete kubernetes
|
||||
|
||||
gcloud -q compute addresses delete kubernetes-the-hard-way
|
||||
}
|
||||
```
|
||||
|
||||
Delete the `kubernetes-the-hard-way` firewall rules:
|
||||
|
||||
```
|
||||
gcloud -q compute firewall-rules delete \
|
||||
kubernetes-the-hard-way-allow-nginx-service \
|
||||
kubernetes-the-hard-way-allow-internal \
|
||||
kubernetes-the-hard-way-allow-external \
|
||||
kubernetes-the-hard-way-allow-health-check
|
||||
```
|
||||
|
||||
Delete the `kubernetes-the-hard-way` network VPC:
|
||||
|
||||
```
|
||||
{
|
||||
gcloud -q compute routes delete \
|
||||
kubernetes-route-10-200-0-0-24 \
|
||||
kubernetes-route-10-200-1-0-24 \
|
||||
kubernetes-route-10-200-2-0-24
|
||||
|
||||
gcloud -q compute networks subnets delete kubernetes
|
||||
|
||||
gcloud -q compute networks delete kubernetes-the-hard-way
|
||||
}
|
||||
```
|
|
@ -0,0 +1,54 @@
|
|||
## RBAC for Kubelet Authorization
|
||||
|
||||
In this section you will configure RBAC permissions to allow the Kubernetes API Server to access the Kubelet API on each worker node. Access to the Kubelet API is required for retrieving metrics, logs, and executing commands in pods.
|
||||
|
||||
> This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization.
|
||||
|
||||
|
||||
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods:
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:kube-apiserver-to-kubelet
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/proxy
|
||||
- nodes/stats
|
||||
- nodes/log
|
||||
- nodes/spec
|
||||
- nodes/metrics
|
||||
verbs:
|
||||
- "*"
|
||||
EOF
|
||||
```
|
||||
|
||||
The Kubernetes API Server authenticates to the Kubelet as the `kubernetes` user using the client certificate as defined by the `--kubelet-client-certificate` flag.
|
||||
|
||||
Bind the `system:kube-apiserver-to-kubelet` ClusterRole to the `kubernetes` user:
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:kube-apiserver
|
||||
namespace: ""
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-apiserver-to-kubelet
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: kube-apiserver
|
||||
EOF
|
||||
```
|
|
@ -16,13 +16,12 @@ kubectl create secret generic kubernetes-the-hard-way \
|
|||
Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0 \
|
||||
--command "sudo ETCDCTL_API=3 etcdctl get \
|
||||
sudo ETCDCTL_API=3 etcdctl get \
|
||||
--endpoints=https://127.0.0.1:2379 \
|
||||
--cacert=/etc/etcd/ca.pem \
|
||||
--cert=/etc/etcd/kubernetes.pem \
|
||||
--key=/etc/etcd/kubernetes-key.pem\
|
||||
/registry/secrets/default/kubernetes-the-hard-way | hexdump -C"
|
||||
--cacert=/etc/etcd/ca.crt \
|
||||
--cert=/etc/etcd/etcd-server.crt \
|
||||
--key=/etc/etcd/etcd-server.key\
|
||||
/registry/secrets/default/kubernetes-the-hard-way | hexdump -C
|
||||
```
|
||||
|
||||
> output
|
||||
|
@ -32,22 +31,25 @@ gcloud compute ssh controller-0 \
|
|||
00000010 73 2f 64 65 66 61 75 6c 74 2f 6b 75 62 65 72 6e |s/default/kubern|
|
||||
00000020 65 74 65 73 2d 74 68 65 2d 68 61 72 64 2d 77 61 |etes-the-hard-wa|
|
||||
00000030 79 0a 6b 38 73 3a 65 6e 63 3a 61 65 73 63 62 63 |y.k8s:enc:aescbc|
|
||||
00000040 3a 76 31 3a 6b 65 79 31 3a dd 3f 36 6c ce 65 9d |:v1:key1:.?6l.e.|
|
||||
00000050 b3 b1 46 1a ba ae a2 1f e4 fa 13 0c 4b 6e 2c 3c |..F.........Kn,<|
|
||||
00000060 15 fa 88 56 84 b7 aa c0 7a ca 66 f3 de db 2b a3 |...V....z.f...+.|
|
||||
00000070 88 dc b1 b1 d8 2f 16 3e 6b 4a cb ac 88 5d 23 2d |...../.>kJ...]#-|
|
||||
00000080 99 62 be 72 9f a5 01 38 15 c4 43 ac 38 5f ef 88 |.b.r...8..C.8_..|
|
||||
00000090 3b 88 c1 e6 b6 06 4f ae a8 6b c8 40 70 ac 0a d3 |;.....O..k.@p...|
|
||||
000000a0 3e dc 2b b6 0f 01 b6 8b e2 21 29 4d 32 d6 67 a6 |>.+......!)M2.g.|
|
||||
000000b0 4e 6d bb 61 0d 85 22 ea f4 d6 2d 0a af 3c 71 85 |Nm.a.."...-..<q.|
|
||||
000000c0 96 27 c9 ec 90 e3 56 8c 94 a7 1c 9a 0e 00 28 11 |.'....V.......(.|
|
||||
000000d0 18 28 f4 33 42 d9 57 d9 e3 e9 1c 38 e3 bc 1e c3 |.(.3B.W....8....|
|
||||
000000e0 d2 47 f3 20 60 be b8 57 a7 0a |.G. `..W..|
|
||||
00000040 3a 76 31 3a 6b 65 79 31 3a 78 cd 3c 33 3a 60 d7 |:v1:key1:x.<3:`.|
|
||||
00000050 4c 1e 4c f1 97 ce 75 6f 3d a7 f1 4b 59 e8 f9 2a |L.L...uo=..KY..*|
|
||||
00000060 17 77 20 14 ab 73 85 63 12 12 a4 8d 3c 6e 04 4c |.w ..s.c....<n.L|
|
||||
00000070 e0 84 6f 10 7b 3a 13 10 d0 cd df 81 d0 08 be fa |..o.{:..........|
|
||||
00000080 ea 74 ca 53 b3 b2 90 95 e1 ba bc 3f 88 76 db 8e |.t.S.......?.v..|
|
||||
00000090 e1 1e 17 ea 0d b0 3b e3 e3 df eb 2e 57 76 1d d0 |......;.....Wv..|
|
||||
000000a0 25 ca ee 5b f2 27 c7 f2 8e 58 93 e9 28 45 8f 3a |%..[.'...X..(E.:|
|
||||
000000b0 e7 97 bf 74 86 72 fd e7 f1 bb fc f7 2d 10 4d c3 |...t.r......-.M.|
|
||||
000000c0 70 1d 08 75 c3 7c 14 55 18 9d 68 73 ec e3 41 3a |p..u.|.U..hs..A:|
|
||||
000000d0 dc 41 8a 4b 9e 33 d9 3d c0 04 60 10 cf ad a4 88 |.A.K.3.=..`.....|
|
||||
000000e0 7b e7 93 3f 7a e8 1b 22 bf 0a |{..?z.."..|
|
||||
000000ea
|
||||
```
|
||||
|
||||
The etcd key should be prefixed with `k8s:enc:aescbc:v1:key1`, which indicates the `aescbc` provider was used to encrypt the data with the `key1` encryption key.
|
||||
|
||||
Cleanup:
|
||||
`kubectl delete secret kubernetes-the-hard-way`
|
||||
|
||||
## Deployments
|
||||
|
||||
In this section you will verify the ability to create and manage [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/).
|
|
@ -0,0 +1,34 @@
|
|||
# Run End-to-End Tests
|
||||
|
||||
Install Go
|
||||
|
||||
```
|
||||
wget https://dl.google.com/go/go1.12.1.linux-amd64.tar.gz
|
||||
|
||||
sudo tar -C /usr/local -xzf go1.12.1.linux-amd64.tar.gz
|
||||
export GOPATH="/home/vagrant/go"
|
||||
export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
|
||||
```
|
||||
|
||||
## Install kubetest
|
||||
|
||||
```
|
||||
sudo go get -v -u k8s.io/test-infra/kubetest
|
||||
```
|
||||
|
||||
> Note: This may take a few minutes depending on your network speed
|
||||
|
||||
## Extract the Version
|
||||
|
||||
```
|
||||
kubetest --extract=v1.13.0
|
||||
|
||||
cd kubernetes
|
||||
|
||||
export KUBE_MASTER_IP="192.168.5.11:6443"
|
||||
|
||||
export KUBE_MASTER=master-1
|
||||
|
||||
kubetest --test --provider=skeleton --test_args="--ginkgo.focus=\[Conformance\]" | tee test.out
|
||||
|
||||
```
|
|
@ -0,0 +1,47 @@
|
|||
# Dynamic Kubelet Configuration
|
||||
|
||||
`sudo apt install -y jq`
|
||||
|
||||
|
||||
```
|
||||
NODE_NAME="worker-1"; NODE_NAME="worker-1"; curl -sSL "https://localhost:6443/api/v1/nodes/${NODE_NAME}/proxy/configz" -k --cert admin.crt --key admin.key | jq '.kubeletconfig|.kind="KubeletConfiguration"|.apiVersion="kubelet.config.k8s.io/v1beta1"' > kubelet_configz_${NODE_NAME}
|
||||
```
|
||||
|
||||
```
|
||||
kubectl -n kube-system create configmap nodes-config --from-file=kubelet=kubelet_configz_${NODE_NAME} --append-hash -o yaml
|
||||
```
|
||||
|
||||
Edit node to use the dynamically created configuration
|
||||
```
|
||||
kubectl edit worker-2
|
||||
```
|
||||
|
||||
Configure Kubelet Service
|
||||
|
||||
Create the `kubelet.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/kubelet \\
|
||||
--bootstrap-kubeconfig="/var/lib/kubelet/bootstrap-kubeconfig" \\
|
||||
--image-pull-progress-deadline=2m \\
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
||||
--dynamic-config-dir=/var/lib/kubelet/dynamic-config \\
|
||||
--cert-dir= /var/lib/kubelet/ \\
|
||||
--network-plugin=cni \\
|
||||
--register-node=true \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
|
@ -0,0 +1,21 @@
|
|||
# Differences between original and this solution
|
||||
|
||||
Platform: I use VirtualBox to setup a local cluster, the original one uses GCP
|
||||
|
||||
Nodes: 2 Master and 2 Worker vs 2 Master and 3 Worker nodes
|
||||
|
||||
Configure 1 worker node normally
|
||||
and the second one with TLS bootstrap
|
||||
|
||||
Node Names: I use worker-1 worker-2 instead of worker-0 worker-1
|
||||
|
||||
IP Addresses: I use statically assigned IPs on private network
|
||||
|
||||
Certificate File Names: I use <name>.crt for public certificate and <name>.key for private key file. Whereas original one uses <name>-.pem for certificate and <name>-key.pem for private key.
|
||||
|
||||
I generate separate certificates for etcd-server instead of using kube-apiserver
|
||||
|
||||
Network:
|
||||
We use weavenet
|
||||
|
||||
Add E2E Tests
|
|
@ -0,0 +1,120 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi:set ft=ruby sw=2 ts=2 sts=2:
|
||||
|
||||
# Define the number of master and worker nodes
|
||||
# If this number is changed, remember to update setup-hosts.sh script with the new hosts IP details in /etc/hosts of each VM.
|
||||
NUM_MASTER_NODE = 2
|
||||
NUM_WORKER_NODE = 2
|
||||
|
||||
IP_NW = "192.168.5."
|
||||
MASTER_IP_START = 10
|
||||
NODE_IP_START = 20
|
||||
LB_IP_START = 30
|
||||
|
||||
# All Vagrant configuration is done below. The "2" in Vagrant.configure
|
||||
# configures the configuration version (we support older styles for
|
||||
# backwards compatibility). Please don't change it unless you know what
|
||||
# you're doing.
|
||||
Vagrant.configure("2") do |config|
|
||||
# The most common configuration options are documented and commented below.
|
||||
# For a complete reference, please see the online documentation at
|
||||
# https://docs.vagrantup.com.
|
||||
|
||||
# Every Vagrant development environment requires a box. You can search for
|
||||
# boxes at https://vagrantcloud.com/search.
|
||||
# config.vm.box = "base"
|
||||
config.vm.box = "ubuntu/bionic64"
|
||||
|
||||
# Disable automatic box update checking. If you disable this, then
|
||||
# boxes will only be checked for updates when the user runs
|
||||
# `vagrant box outdated`. This is not recommended.
|
||||
config.vm.box_check_update = false
|
||||
|
||||
# Create a public network, which generally matched to bridged network.
|
||||
# Bridged networks make the machine appear as another physical device on
|
||||
# your network.
|
||||
# config.vm.network "public_network"
|
||||
|
||||
# Share an additional folder to the guest VM. The first argument is
|
||||
# the path on the host to the actual folder. The second argument is
|
||||
# the path on the guest to mount the folder. And the optional third
|
||||
# argument is a set of non-required options.
|
||||
# config.vm.synced_folder "../data", "/vagrant_data"
|
||||
|
||||
# Provider-specific configuration so you can fine-tune various
|
||||
# backing providers for Vagrant. These expose provider-specific options.
|
||||
# Example for VirtualBox:
|
||||
#
|
||||
# config.vm.provider "virtualbox" do |vb|
|
||||
# # Customize the amount of memory on the VM:
|
||||
# vb.memory = "1024"
|
||||
# end
|
||||
#
|
||||
# View the documentation for the provider you are using for more
|
||||
# information on available options.
|
||||
|
||||
# Provision Master Nodes
|
||||
(1..NUM_MASTER_NODE).each do |i|
|
||||
config.vm.define "master-#{i}" do |node|
|
||||
# Name shown in the GUI
|
||||
node.vm.provider "virtualbox" do |vb|
|
||||
vb.name = "kubernetes-ha-master-#{i}"
|
||||
vb.memory = 2048
|
||||
vb.cpus = 2
|
||||
end
|
||||
node.vm.hostname = "master-#{i}"
|
||||
node.vm.network :private_network, ip: IP_NW + "#{MASTER_IP_START + i}"
|
||||
node.vm.network "forwarded_port", guest: 22, host: "#{2710 + i}"
|
||||
|
||||
node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
|
||||
s.args = ["enp0s8"]
|
||||
end
|
||||
|
||||
node.vm.provision "allow-bridge-nf-traffic", :type => "shell", :path => "ubuntu/allow-bridge-nf-traffic.sh"
|
||||
node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
# Provision Load Balancer Node
|
||||
config.vm.define "loadbalancer" do |node|
|
||||
node.vm.provider "virtualbox" do |vb|
|
||||
vb.name = "kubernetes-ha-lb"
|
||||
vb.memory = 512
|
||||
vb.cpus = 1
|
||||
end
|
||||
node.vm.hostname = "loadbalancer"
|
||||
node.vm.network :private_network, ip: IP_NW + "#{LB_IP_START}"
|
||||
node.vm.network "forwarded_port", guest: 22, host: 2730
|
||||
|
||||
node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
|
||||
s.args = ["enp0s8"]
|
||||
end
|
||||
|
||||
node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
|
||||
|
||||
end
|
||||
|
||||
# Provision Worker Nodes
|
||||
(1..NUM_WORKER_NODE).each do |i|
|
||||
config.vm.define "worker-#{i}" do |node|
|
||||
node.vm.provider "virtualbox" do |vb|
|
||||
vb.name = "kubernetes-ha-worker-#{i}"
|
||||
vb.memory = 512
|
||||
vb.cpus = 1
|
||||
end
|
||||
node.vm.hostname = "worker-#{i}"
|
||||
node.vm.network :private_network, ip: IP_NW + "#{NODE_IP_START + i}"
|
||||
node.vm.network "forwarded_port", guest: 22, host: "#{2720 + i}"
|
||||
|
||||
node.vm.provision "setup-hosts", :type => "shell", :path => "ubuntu/vagrant/setup-hosts.sh" do |s|
|
||||
s.args = ["enp0s8"]
|
||||
end
|
||||
|
||||
node.vm.provision "allow-bridge-nf-traffic", :type => "shell", :path => "ubuntu/allow-bridge-nf-traffic.sh"
|
||||
node.vm.provision "setup-dns", type: "shell", :path => "ubuntu/update-dns.sh"
|
||||
node.vm.provision "install-docker", type: "shell", :path => "ubuntu/install-docker.sh"
|
||||
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,2 @@
|
|||
#!/bin/bash
|
||||
sysctl net.bridge.bridge-nf-call-iptables=1
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
software-properties-common \
|
||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 18.06 | head -1 | awk '{print $3}')
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
sed -i -e 's/#DNS=/DNS=8.8.8.8/' /etc/systemd/resolved.conf
|
||||
|
||||
service systemd-resolved restart
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
GUEST_ADDITION_VERSION=5.2.4
|
||||
GUEST_ADDITION_ISO=VBoxGuestAdditions_${GUEST_ADDITION_VERSION}.iso
|
||||
GUEST_ADDITION_MOUNT=/media/VBoxGuestAdditions
|
||||
|
||||
apt-get install linux-headers-$(uname -r) build-essential dkms
|
||||
|
||||
wget http://download.virtualbox.org/virtualbox/${GUEST_ADDITION_VERSION}/${GUEST_ADDITION_ISO}
|
||||
mkdir -p ${GUEST_ADDITION_MOUNT}
|
||||
mount -o loop,ro ${GUEST_ADDITION_ISO} ${GUEST_ADDITION_MOUNT}
|
||||
sh ${GUEST_ADDITION_MOUNT}/VBoxLinuxAdditions.run
|
||||
rm ${GUEST_ADDITION_ISO}
|
||||
umount ${GUEST_ADDITION_MOUNT}
|
||||
rmdir ${GUEST_ADDITION_MOUNT}
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
IFNAME=$1
|
||||
ADDRESS="$(ip -4 addr show $IFNAME | grep "inet" | head -1 |awk '{print $2}' | cut -d/ -f1)"
|
||||
sed -e "s/^.*${HOSTNAME}.*/${ADDRESS} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts
|
||||
|
||||
# remove ubuntu-bionic entry
|
||||
sed -e '/^.*ubuntu-bionic.*/d' -i /etc/hosts
|
||||
|
||||
# Update /etc/hosts about other hosts
|
||||
cat >> /etc/hosts <<EOF
|
||||
192.168.5.11 master-1
|
||||
192.168.5.12 master-2
|
||||
192.168.5.21 worker-1
|
||||
192.168.5.22 worker-2
|
||||
192.168.5.30 lb
|
||||
EOF
|
Loading…
Reference in New Issue