update to Kubernetes 1.4
parent
7fe980da3e
commit
be84590c87
|
@ -17,8 +17,8 @@ The target audience for this tutorial is someone planning to support a productio
|
|||
|
||||
## Cluster Details
|
||||
|
||||
* Kubernetes 1.3.6
|
||||
* Docker 1.11.2
|
||||
* Kubernetes 1.4.0
|
||||
* Docker 1.12.1
|
||||
* [CNI Based Networking](https://github.com/containernetworking/cni)
|
||||
* Secure communication between all components (etcd, control plane, workers)
|
||||
* Default Service Account and Secrets
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Cloud Infrastructure Provisioning - Amazon Web Services
|
||||
|
||||
This lab will walk you through provisioning the compute instances required for running a H/A Kubernetes cluster. A total of 9 virtual machines will be created.
|
||||
This lab will walk you through provisioning the compute instances required for running a H/A Kubernetes cluster. A total of 6 virtual machines will be created.
|
||||
|
||||
The guide assumes you'll be creating resources in the `us-west-2` region.
|
||||
|
||||
|
@ -280,65 +280,6 @@ ssh ubuntu@${WORKER_0_PUBLIC_IP_ADDRESS}
|
|||
|
||||
### Virtual Machines
|
||||
|
||||
#### etcd
|
||||
|
||||
```
|
||||
ETCD_0_INSTANCE_ID=$(aws ec2 run-instances \
|
||||
--associate-public-ip-address \
|
||||
--image-id ${IMAGE_ID} \
|
||||
--count 1 \
|
||||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.10 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
||||
```
|
||||
aws ec2 create-tags \
|
||||
--resources ${ETCD_0_INSTANCE_ID} \
|
||||
--tags Key=Name,Value=etcd0
|
||||
```
|
||||
|
||||
```
|
||||
ETCD_1_INSTANCE_ID=$(aws ec2 run-instances \
|
||||
--associate-public-ip-address \
|
||||
--image-id ${IMAGE_ID} \
|
||||
--count 1 \
|
||||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.11 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
||||
```
|
||||
aws ec2 create-tags \
|
||||
--resources ${ETCD_1_INSTANCE_ID} \
|
||||
--tags Key=Name,Value=etcd1
|
||||
```
|
||||
|
||||
```
|
||||
ETCD_2_INSTANCE_ID=$(aws ec2 run-instances \
|
||||
--associate-public-ip-address \
|
||||
--image-id ${IMAGE_ID} \
|
||||
--count 1 \
|
||||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.12 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
||||
```
|
||||
aws ec2 create-tags \
|
||||
--resources ${ETCD_2_INSTANCE_ID} \
|
||||
--tags Key=Name,Value=etcd2
|
||||
```
|
||||
|
||||
#### Kubernetes Controllers
|
||||
|
||||
```
|
||||
|
@ -350,7 +291,7 @@ CONTROLLER_0_INSTANCE_ID=$(aws ec2 run-instances \
|
|||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.20 \
|
||||
--private-ip-address 10.240.0.10 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
@ -376,7 +317,7 @@ CONTROLLER_1_INSTANCE_ID=$(aws ec2 run-instances \
|
|||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.21 \
|
||||
--private-ip-address 10.240.0.11 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
@ -402,7 +343,7 @@ CONTROLLER_2_INSTANCE_ID=$(aws ec2 run-instances \
|
|||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.22 \
|
||||
--private-ip-address 10.240.0.12 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
@ -430,7 +371,7 @@ WORKER_0_INSTANCE_ID=$(aws ec2 run-instances \
|
|||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.30 \
|
||||
--private-ip-address 10.240.0.20 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
@ -456,7 +397,7 @@ WORKER_1_INSTANCE_ID=$(aws ec2 run-instances \
|
|||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.31 \
|
||||
--private-ip-address 10.240.0.21 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
@ -482,7 +423,7 @@ WORKER_2_INSTANCE_ID=$(aws ec2 run-instances \
|
|||
--key-name kubernetes \
|
||||
--security-group-ids ${SECURITY_GROUP_ID} \
|
||||
--instance-type t2.small \
|
||||
--private-ip-address 10.240.0.32 \
|
||||
--private-ip-address 10.240.0.22 \
|
||||
--subnet-id ${SUBNET_ID} | \
|
||||
jq -r '.Instances[].InstanceId')
|
||||
```
|
||||
|
@ -508,13 +449,10 @@ aws ec2 describe-instances \
|
|||
jq -j '.Reservations[].Instances[] | .InstanceId, " ", .Placement.AvailabilityZone, " ", .PrivateIpAddress, " ", .PublicIpAddress, "\n"'
|
||||
```
|
||||
```
|
||||
i-f3714f2e us-west-2c 10.240.0.22 XX.XXX.XX.XX
|
||||
i-ae714f73 us-west-2c 10.240.0.11 XX.XX.XX.XXX
|
||||
i-f4714f29 us-west-2c 10.240.0.21 XX.XX.XXX.XXX
|
||||
i-f6714f2b us-west-2c 10.240.0.12 XX.XX.XX.XX
|
||||
i-e26e503f us-west-2c 10.240.0.30 XX.XX.XXX.XXX
|
||||
i-e36e503e us-west-2c 10.240.0.31 XX.XX.XX.XX
|
||||
i-e26e503f us-west-2c 10.240.0.22 XX.XX.XXX.XXX
|
||||
i-e8714f35 us-west-2c 10.240.0.10 XX.XX.XXX.XXX
|
||||
i-78704ea5 us-west-2c 10.240.0.20 XX.XX.XXX.XXX
|
||||
i-4a6e5097 us-west-2c 10.240.0.32 XX.XX.XX.XX
|
||||
```
|
||||
|
|
|
@ -1,15 +1,6 @@
|
|||
# Cloud Infrastructure Provisioning - Google Cloud Platform
|
||||
|
||||
This lab will walk you through provisioning the compute instances required for running a H/A Kubernetes cluster. A total of 9 virtual machines will be created.
|
||||
|
||||
If you are following this guide using the GCP free trial you may run into the following error:
|
||||
|
||||
```
|
||||
ERROR: (gcloud.compute.instances.create) Some requests did not succeed:
|
||||
- Quota 'CPUS' exceeded. Limit: 8.0
|
||||
```
|
||||
|
||||
This means you'll only be able to create 8 machines until you upgrade your account. In that case skip the provisioning of the `worker2` node to avoid hitting the CPUS qouta.
|
||||
This lab will walk you through provisioning the compute instances required for running a H/A Kubernetes cluster. A total of 6 virtual machines will be created.
|
||||
|
||||
After completing this guide you should have the following compute instances:
|
||||
|
||||
|
@ -19,15 +10,12 @@ gcloud compute instances list
|
|||
|
||||
````
|
||||
NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS
|
||||
controller0 us-central1-f n1-standard-1 10.240.0.20 XXX.XXX.XXX.XXX RUNNING
|
||||
controller1 us-central1-f n1-standard-1 10.240.0.21 XXX.XXX.XXX.XXX RUNNING
|
||||
controller2 us-central1-f n1-standard-1 10.240.0.22 XXX.XXX.XXX.XXX RUNNING
|
||||
etcd0 us-central1-f n1-standard-1 10.240.0.10 XXX.XXX.XXX.XXX RUNNING
|
||||
etcd1 us-central1-f n1-standard-1 10.240.0.11 XXX.XXX.XXX.XXX RUNNING
|
||||
etcd2 us-central1-f n1-standard-1 10.240.0.12 XXX.XXX.XXX.XXX RUNNING
|
||||
worker0 us-central1-f n1-standard-1 10.240.0.30 XXX.XXX.XXX.XXX RUNNING
|
||||
worker1 us-central1-f n1-standard-1 10.240.0.31 XXX.XXX.XXX.XXX RUNNING
|
||||
worker2 us-central1-f n1-standard-1 10.240.0.32 XXX.XXX.XXX.XXX RUNNING
|
||||
controller0 us-central1-f n1-standard-1 10.240.0.10 XXX.XXX.XXX.XXX RUNNING
|
||||
controller1 us-central1-f n1-standard-1 10.240.0.11 XXX.XXX.XXX.XXX RUNNING
|
||||
controller2 us-central1-f n1-standard-1 10.240.0.12 XXX.XXX.XXX.XXX RUNNING
|
||||
worker0 us-central1-f n1-standard-1 10.240.0.20 XXX.XXX.XXX.XXX RUNNING
|
||||
worker1 us-central1-f n1-standard-1 10.240.0.21 XXX.XXX.XXX.XXX RUNNING
|
||||
worker2 us-central1-f n1-standard-1 10.240.0.22 XXX.XXX.XXX.XXX RUNNING
|
||||
````
|
||||
|
||||
> All machines will be provisioned with fixed private IP addresses to simplify the bootstrap process.
|
||||
|
@ -50,8 +38,7 @@ Create a subnet for the Kubernetes cluster:
|
|||
```
|
||||
gcloud compute networks subnets create kubernetes \
|
||||
--network kubernetes \
|
||||
--range 10.240.0.0/24 \
|
||||
--region us-central1
|
||||
--range 10.240.0.0/24
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -140,13 +127,13 @@ All the VMs in this lab will be provisioned using Ubuntu 16.04 mainly because it
|
|||
|
||||
### Virtual Machines
|
||||
|
||||
#### etcd
|
||||
#### Kubernetes Controllers
|
||||
|
||||
```
|
||||
gcloud compute instances create etcd0 \
|
||||
gcloud compute instances create controller0 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image ubuntu-1604-xenial-v20160921 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.10 \
|
||||
|
@ -154,59 +141,24 @@ gcloud compute instances create etcd0 \
|
|||
```
|
||||
|
||||
```
|
||||
gcloud compute instances create etcd1 \
|
||||
gcloud compute instances create controller1 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image ubuntu-1604-xenial-v20160921 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.11 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
||||
```
|
||||
gcloud compute instances create etcd2 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.12 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
||||
#### Kubernetes Controllers
|
||||
|
||||
```
|
||||
gcloud compute instances create controller0 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.20 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
||||
```
|
||||
gcloud compute instances create controller1 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.21 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
||||
```
|
||||
gcloud compute instances create controller2 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image ubuntu-1604-xenial-v20160921 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.22 \
|
||||
--private-network-ip 10.240.0.12 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
||||
|
@ -216,10 +168,10 @@ gcloud compute instances create controller2 \
|
|||
gcloud compute instances create worker0 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image ubuntu-1604-xenial-v20160921 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.30 \
|
||||
--private-network-ip 10.240.0.20 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
||||
|
@ -227,22 +179,20 @@ gcloud compute instances create worker0 \
|
|||
gcloud compute instances create worker1 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image ubuntu-1604-xenial-v20160921 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.31 \
|
||||
--private-network-ip 10.240.0.21 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
||||
If you are using the GCP free trial which limits your account to 8 nodes, skip the creation of `worker2` to avoid hitting the CPUS qouta.
|
||||
|
||||
```
|
||||
gcloud compute instances create worker2 \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image ubuntu-1604-xenial-v20160627 \
|
||||
--image ubuntu-1604-xenial-v20160921 \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.32 \
|
||||
--private-network-ip 10.240.0.22 \
|
||||
--subnet kubernetes
|
||||
```
|
||||
|
|
|
@ -149,9 +149,9 @@ cat > kubernetes-csr.json <<EOF
|
|||
"worker0",
|
||||
"worker1",
|
||||
"worker2",
|
||||
"ip-10-240-0-30",
|
||||
"ip-10-240-0-31",
|
||||
"ip-10-240-0-32",
|
||||
"ip-10-240-0-20",
|
||||
"ip-10-240-0-21",
|
||||
"ip-10-240-0-22",
|
||||
"10.32.0.1",
|
||||
"10.240.0.10",
|
||||
"10.240.0.11",
|
||||
|
@ -159,9 +159,6 @@ cat > kubernetes-csr.json <<EOF
|
|||
"10.240.0.20",
|
||||
"10.240.0.21",
|
||||
"10.240.0.22",
|
||||
"10.240.0.30",
|
||||
"10.240.0.31",
|
||||
"10.240.0.32",
|
||||
"${KUBERNETES_PUBLIC_ADDRESS}",
|
||||
"127.0.0.1"
|
||||
],
|
||||
|
@ -212,7 +209,7 @@ openssl x509 -in kubernetes.pem -text -noout
|
|||
Set the list of Kubernetes hosts where the certs should be copied to:
|
||||
|
||||
```
|
||||
KUBERNETES_HOSTS=(controller0 controller1 controller2 etcd0 etcd1 etcd2 worker0 worker1 worker2)
|
||||
KUBERNETES_HOSTS=(controller0 controller1 controller2 worker0 worker1 worker2)
|
||||
```
|
||||
|
||||
### GCE
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
In this lab you will bootstrap a 3 node etcd cluster. The following virtual machines will be used:
|
||||
|
||||
* etcd0
|
||||
* etcd1
|
||||
* etcd2
|
||||
* controller0
|
||||
* controller1
|
||||
* controller2
|
||||
|
||||
## Why
|
||||
|
||||
|
@ -18,37 +18,47 @@ following reasons:
|
|||
|
||||
## Provision the etcd Cluster
|
||||
|
||||
Run the following commands on `etcd0`, `etcd1`, `etcd2`:
|
||||
Run the following commands on `controller0`, `controller1`, `controller2`:
|
||||
|
||||
Move the TLS certificates in place:
|
||||
### TLS Certificates
|
||||
|
||||
The TLS certificates created in the [Setting up a CA and TLS Cert Generation](02-certificate-authority.md) lab will be used to secure communication between the Kubernetes API server and the etcd cluster. The TLS certificates will also be used to limit access to the etcd cluster using TLS client authentication. Only clients with a TLS certificate signed by a trusted CA will be able to access the etcd cluster.
|
||||
|
||||
Copy the TLS certificates to the etcd configuration directory:
|
||||
|
||||
```
|
||||
sudo mkdir -p /etc/etcd/
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
|
||||
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
|
||||
```
|
||||
|
||||
Download and install the etcd binaries:
|
||||
### Download and Install the etcd binaries
|
||||
|
||||
Download the official etcd release binaries from `coreos/etcd` GitHub project:
|
||||
|
||||
```
|
||||
wget https://github.com/coreos/etcd/releases/download/v3.0.8/etcd-v3.0.8-linux-amd64.tar.gz
|
||||
wget https://github.com/coreos/etcd/releases/download/v3.0.10/etcd-v3.0.10-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
Extract and install the `etcd` server binary and the `etcdctl` command line client:
|
||||
|
||||
```
|
||||
tar -xvf etcd-v3.0.10-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
```
|
||||
tar -xvf etcd-v3.0.8-linux-amd64.tar.gz
|
||||
sudo mv etcd-v3.0.10-linux-amd64/etcd* /usr/bin/
|
||||
```
|
||||
|
||||
```
|
||||
sudo cp etcd-v3.0.8-linux-amd64/etcd* /usr/bin/
|
||||
```
|
||||
All etcd data is stored under the etcd data directory. In a production cluster the data directory should be backed by a persistent disk. Create the etcd data directory:
|
||||
|
||||
```
|
||||
sudo mkdir -p /var/lib/etcd
|
||||
```
|
||||
|
||||
Create the etcd systemd unit file:
|
||||
The etcd server will be started and managed by systemd. Create the etcd systemd unit file:
|
||||
|
||||
```
|
||||
cat > etcd.service <<"EOF"
|
||||
|
@ -57,6 +67,7 @@ Description=etcd
|
|||
Documentation=https://github.com/coreos
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart=/usr/bin/etcd --name ETCD_NAME \
|
||||
--cert-file=/etc/etcd/kubernetes.pem \
|
||||
--key-file=/etc/etcd/kubernetes-key.pem \
|
||||
|
@ -69,7 +80,7 @@ ExecStart=/usr/bin/etcd --name ETCD_NAME \
|
|||
--listen-client-urls https://INTERNAL_IP:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://INTERNAL_IP:2379 \
|
||||
--initial-cluster-token etcd-cluster-0 \
|
||||
--initial-cluster etcd0=https://10.240.0.10:2380,etcd1=https://10.240.0.11:2380,etcd2=https://10.240.0.12:2380 \
|
||||
--initial-cluster controller0=https://10.240.0.10:2380,controller1=https://10.240.0.11:2380,controller2=https://10.240.0.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--data-dir=/var/lib/etcd
|
||||
Restart=on-failure
|
||||
|
@ -82,6 +93,8 @@ EOF
|
|||
|
||||
### Set The Internal IP Address
|
||||
|
||||
The internal IP address will be used by etcd to serve client requests and communicate with other etcd peers.
|
||||
|
||||
#### GCE
|
||||
|
||||
```
|
||||
|
@ -97,12 +110,14 @@ INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
|
|||
|
||||
---
|
||||
|
||||
Set the etcd name:
|
||||
Each etcd member must have a unique name within an etcd cluster. Set the etcd name:
|
||||
|
||||
```
|
||||
ETCD_NAME=etcd$(echo $INTERNAL_IP | cut -c 11)
|
||||
ETCD_NAME=controller$(echo $INTERNAL_IP | cut -c 11)
|
||||
```
|
||||
|
||||
Substitute the etcd name and internal IP address:
|
||||
|
||||
```
|
||||
sed -i s/INTERNAL_IP/${INTERNAL_IP}/g etcd.service
|
||||
```
|
||||
|
@ -111,15 +126,21 @@ sed -i s/INTERNAL_IP/${INTERNAL_IP}/g etcd.service
|
|||
sed -i s/ETCD_NAME/${ETCD_NAME}/g etcd.service
|
||||
```
|
||||
|
||||
Once the etcd systemd unit file is ready, move it to the systemd system directory:
|
||||
|
||||
```
|
||||
sudo mv etcd.service /etc/systemd/system/
|
||||
```
|
||||
|
||||
Start etcd:
|
||||
Start the etcd server:
|
||||
|
||||
```
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
```
|
||||
sudo systemctl enable etcd
|
||||
```
|
||||
```
|
||||
sudo systemctl start etcd
|
||||
```
|
||||
|
||||
|
@ -130,13 +151,13 @@ sudo systemctl start etcd
|
|||
sudo systemctl status etcd --no-pager
|
||||
```
|
||||
|
||||
> Remember to run these steps on `etcd0`, `etcd1`, and `etcd2`
|
||||
> Remember to run these steps on `controller0`, `controller1`, and `controller2`
|
||||
|
||||
## Verification
|
||||
|
||||
Once all 3 etcd nodes have been bootstrapped verify the etcd cluster is healthy:
|
||||
|
||||
* SSH to etcd0 and run the following commands:
|
||||
* On one of the controller nodes run the following command:
|
||||
|
||||
```
|
||||
etcdctl --ca-file=/etc/etcd/ca.pem cluster-health
|
||||
|
|
|
@ -27,24 +27,38 @@ Each component is being run on the same machines for the following reasons:
|
|||
|
||||
Run the following commands on `controller0`, `controller1`, `controller2`:
|
||||
|
||||
Move the TLS certificates in place:
|
||||
### TLS Certificates
|
||||
|
||||
The TLS certificates created in the [Setting up a CA and TLS Cert Generation](02-certificate-authority.md) lab will be used to secure communication between the Kubernetes API server and Kubernetes clients such as `kubectl` and the `kubelet` agent. The TLS certificates will also be used to authenticate the Kubernetes API server to etcd via TLC client auth.
|
||||
|
||||
Copy the TLS certificates to the Kubernetes configuration directory:
|
||||
|
||||
```
|
||||
sudo mkdir -p /var/lib/kubernetes
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
||||
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
Download and install the Kubernetes controller binaries:
|
||||
### Download and install the Kubernetes controller binaries
|
||||
|
||||
Download the official Kubernetes release binaries:
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kube-apiserver
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kube-controller-manager
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kube-scheduler
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kube-apiserver
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kube-controller-manager
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kube-scheduler
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kubectl
|
||||
```
|
||||
|
||||
Install the Kubernetes binaries:
|
||||
|
||||
```
|
||||
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl
|
||||
|
@ -300,8 +314,7 @@ gcloud compute http-health-checks create kube-apiserver-check \
|
|||
|
||||
```
|
||||
gcloud compute target-pools create kubernetes-pool \
|
||||
--health-check kube-apiserver-check \
|
||||
--region us-central1
|
||||
--health-check kube-apiserver-check
|
||||
```
|
||||
|
||||
```
|
||||
|
|
|
@ -27,19 +27,19 @@ sudo mkdir -p /var/lib/kubernetes
|
|||
```
|
||||
|
||||
```
|
||||
sudo mv ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
||||
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
#### Docker
|
||||
|
||||
Kubernetes should be compatible with the Docker 1.9.x - 1.11.x:
|
||||
Kubernetes should be compatible with the Docker 1.9.x - 1.12.x:
|
||||
|
||||
```
|
||||
wget https://get.docker.com/builds/Linux/x86_64/docker-1.11.2.tgz
|
||||
wget https://get.docker.com/builds/Linux/x86_64/docker-1.12.1.tgz
|
||||
```
|
||||
|
||||
```
|
||||
tar -xvf docker-1.11.2.tgz
|
||||
tar -xvf docker-1.12.1.tgz
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -90,20 +90,24 @@ sudo mkdir -p /opt/cni
|
|||
```
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/network-plugins/cni-c864f0e1ea73719b8f4582402b0847064f9883b0.tar.gz
|
||||
wget https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
|
||||
```
|
||||
|
||||
```
|
||||
sudo tar -xvf cni-c864f0e1ea73719b8f4582402b0847064f9883b0.tar.gz -C /opt/cni
|
||||
sudo tar -xvf cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz -C /opt/cni
|
||||
```
|
||||
|
||||
|
||||
Download and install the Kubernetes worker binaries:
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kube-proxy
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kubelet
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kubectl
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kube-proxy
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kubelet
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -124,7 +128,7 @@ kind: Config
|
|||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /var/lib/kubernetes/ca.pem
|
||||
server: https://10.240.0.20:6443
|
||||
server: https://10.240.0.10:6443
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
|
@ -150,7 +154,7 @@ Requires=docker.service
|
|||
[Service]
|
||||
ExecStart=/usr/bin/kubelet \
|
||||
--allow-privileged=true \
|
||||
--api-servers=https://10.240.0.20:6443,https://10.240.0.21:6443,https://10.240.0.22:6443 \
|
||||
--api-servers=https://10.240.0.10:6443,https://10.240.0.11:6443,https://10.240.0.12:6443 \
|
||||
--cloud-provider= \
|
||||
--cluster-dns=10.32.0.10 \
|
||||
--cluster-domain=cluster.local \
|
||||
|
@ -193,7 +197,7 @@ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
|||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/kube-proxy \
|
||||
--master=https://10.240.0.20:6443 \
|
||||
--master=https://10.240.0.10:6443 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--proxy-mode=iptables \
|
||||
--v=2
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
### OS X
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/darwin/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/darwin/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin
|
||||
```
|
||||
|
@ -13,7 +13,7 @@ sudo mv kubectl /usr/local/bin
|
|||
### Linux
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.0/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin
|
||||
```
|
||||
|
|
|
@ -42,9 +42,9 @@ kubectl get nodes \
|
|||
Output:
|
||||
|
||||
```
|
||||
10.240.0.30 10.200.0.0/24
|
||||
10.240.0.31 10.200.1.0/24
|
||||
10.240.0.32 10.200.2.0/24
|
||||
10.240.0.20 10.200.0.0/24
|
||||
10.240.0.21 10.200.1.0/24
|
||||
10.240.0.22 10.200.2.0/24
|
||||
```
|
||||
|
||||
## Create Routes
|
||||
|
@ -54,21 +54,21 @@ Output:
|
|||
```
|
||||
gcloud compute routes create kubernetes-route-10-200-0-0-24 \
|
||||
--network kubernetes \
|
||||
--next-hop-address 10.240.0.30 \
|
||||
--next-hop-address 10.240.0.20 \
|
||||
--destination-range 10.200.0.0/24
|
||||
```
|
||||
|
||||
```
|
||||
gcloud compute routes create kubernetes-route-10-200-1-0-24 \
|
||||
--network kubernetes \
|
||||
--next-hop-address 10.240.0.31 \
|
||||
--next-hop-address 10.240.0.21 \
|
||||
--destination-range 10.200.1.0/24
|
||||
```
|
||||
|
||||
```
|
||||
gcloud compute routes create kubernetes-route-10-200-2-0-24 \
|
||||
--network kubernetes \
|
||||
--next-hop-address 10.240.0.32 \
|
||||
--next-hop-address 10.240.0.22 \
|
||||
--destination-range 10.200.2.0/24
|
||||
```
|
||||
|
||||
|
|
Loading…
Reference in New Issue