update to Kubernetes 1.6
parent
b7ae204c86
commit
3b1700d85e
|
@ -17,9 +17,9 @@ The target audience for this tutorial is someone planning to support a productio
|
|||
|
||||
## Cluster Details
|
||||
|
||||
* Kubernetes 1.5.1
|
||||
* Kubernetes 1.6.0
|
||||
* Docker 1.12.1
|
||||
* etcd 3.0.10
|
||||
* etcd 3.1.4
|
||||
* [CNI Based Networking](https://github.com/containernetworking/cni)
|
||||
* Secure communication between all components (etcd, control plane, workers)
|
||||
* Default Service Account and Secrets
|
||||
|
|
|
@ -33,6 +33,7 @@ gcloud config set compute/region us-central1
|
|||
```
|
||||
gcloud config set compute/zone us-central1-f
|
||||
```
|
||||
|
||||
Create a Kubernetes network:
|
||||
|
||||
```
|
||||
|
@ -104,13 +105,14 @@ gcloud compute firewall-rules list --filter "network=kubernetes"
|
|||
```
|
||||
|
||||
```
|
||||
NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS
|
||||
kubernetes-allow-api-server kubernetes 0.0.0.0/0 tcp:6443
|
||||
kubernetes-allow-healthz kubernetes 130.211.0.0/22 tcp:8080
|
||||
kubernetes-allow-icmp kubernetes 0.0.0.0/0 icmp
|
||||
kubernetes-allow-internal kubernetes 10.240.0.0/24 tcp:0-65535,udp:0-65535,icmp
|
||||
kubernetes-allow-rdp kubernetes 0.0.0.0/0 tcp:3389
|
||||
kubernetes-allow-ssh kubernetes 0.0.0.0/0 tcp:22
|
||||
NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS
|
||||
kubernetes-allow-api-server kubernetes 0.0.0.0/0 tcp:6443
|
||||
kubernetes-allow-healthz kubernetes 130.211.0.0/22 tcp:8080
|
||||
kubernetes-allow-icmp kubernetes 0.0.0.0/0 icmp
|
||||
kubernetes-allow-internal kubernetes 10.240.0.0/24 tcp:0-65535,udp:0-65535,icmp
|
||||
kubernetes-allow-internal-podcidr kubernetes 10.200.0.0/16 tcp:0-65535,udp:0-65535,icmp
|
||||
kubernetes-allow-rdp kubernetes 0.0.0.0/0 tcp:3389
|
||||
kubernetes-allow-ssh kubernetes 0.0.0.0/0 tcp:22
|
||||
```
|
||||
|
||||
### Kubernetes Public Address
|
||||
|
|
|
@ -139,6 +139,49 @@ KUBERNETES_PUBLIC_ADDRESS=$(aws elb describe-load-balancers \
|
|||
|
||||
---
|
||||
|
||||
Create the `admin-csr.json` file:
|
||||
|
||||
```
|
||||
cat > admin-csr.json <<EOF
|
||||
{
|
||||
"CN": "admin",
|
||||
"hosts": [],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:masters",
|
||||
"OU": "Cluster",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
Generate the admin certificate and private key:
|
||||
|
||||
```
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
admin-csr.json | cfssljson -bare admin
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
admin-key.pem
|
||||
admin.csr
|
||||
admin.pem
|
||||
```
|
||||
|
||||
Create the `kubernetes-csr.json` file:
|
||||
|
||||
```
|
||||
|
@ -146,19 +189,13 @@ cat > kubernetes-csr.json <<EOF
|
|||
{
|
||||
"CN": "kubernetes",
|
||||
"hosts": [
|
||||
"worker0",
|
||||
"worker1",
|
||||
"worker2",
|
||||
"ip-10-240-0-20",
|
||||
"ip-10-240-0-21",
|
||||
"ip-10-240-0-22",
|
||||
"10.32.0.1",
|
||||
"10.240.0.10",
|
||||
"10.240.0.11",
|
||||
"10.240.0.12",
|
||||
"10.240.0.20",
|
||||
"10.240.0.21",
|
||||
"10.240.0.22",
|
||||
"ip-10-240-0-20",
|
||||
"ip-10-240-0-21",
|
||||
"ip-10-240-0-22",
|
||||
"${KUBERNETES_PUBLIC_ADDRESS}",
|
||||
"127.0.0.1",
|
||||
"kubernetes.default"
|
||||
|
@ -213,6 +250,10 @@ Set the list of Kubernetes hosts where the certs should be copied to:
|
|||
KUBERNETES_HOSTS=(controller0 controller1 controller2 worker0 worker1 worker2)
|
||||
```
|
||||
|
||||
```
|
||||
KUBERNETES_CONTROLLERS=(controller0 controller1 controller2)
|
||||
```
|
||||
|
||||
### GCE
|
||||
|
||||
The following command will:
|
||||
|
@ -221,7 +262,13 @@ The following command will:
|
|||
|
||||
```
|
||||
for host in ${KUBERNETES_HOSTS[*]}; do
|
||||
gcloud compute copy-files ca.pem kubernetes-key.pem kubernetes.pem ${host}:~/
|
||||
gcloud compute copy-files ca.pem ${host}:~/
|
||||
done
|
||||
```
|
||||
|
||||
```
|
||||
for host in ${KUBERNETES_CONTROLLERS[*]}; do
|
||||
gcloud compute copy-files ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem ${host}:~/
|
||||
done
|
||||
```
|
||||
|
||||
|
@ -236,7 +283,17 @@ for host in ${KUBERNETES_HOSTS[*]}; do
|
|||
PUBLIC_IP_ADDRESS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=${host}" | \
|
||||
jq -r '.Reservations[].Instances[].PublicIpAddress')
|
||||
scp -o "StrictHostKeyChecking no" ca.pem kubernetes-key.pem kubernetes.pem \
|
||||
scp -o "StrictHostKeyChecking no" ca.pem \
|
||||
ubuntu@${PUBLIC_IP_ADDRESS}:~/
|
||||
done
|
||||
```
|
||||
|
||||
```
|
||||
for host in ${KUBERNETES_HOSTS[*]}; do
|
||||
PUBLIC_IP_ADDRESS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=${host}" | \
|
||||
jq -r '.Reservations[].Instances[].PublicIpAddress')
|
||||
scp -o "StrictHostKeyChecking no" ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
|
||||
ubuntu@${PUBLIC_IP_ADDRESS}:~/
|
||||
done
|
||||
```
|
||||
|
|
|
@ -40,17 +40,17 @@ sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
|
|||
Download the official etcd release binaries from `coreos/etcd` GitHub project:
|
||||
|
||||
```
|
||||
wget https://github.com/coreos/etcd/releases/download/v3.0.15/etcd-v3.0.15-linux-amd64.tar.gz
|
||||
wget https://github.com/coreos/etcd/releases/download/v3.1.4/etcd-v3.1.4-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
Extract and install the `etcd` server binary and the `etcdctl` command line client:
|
||||
|
||||
```
|
||||
tar -xvf etcd-v3.0.15-linux-amd64.tar.gz
|
||||
tar -xvf etcd-v3.1.4-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv etcd-v3.0.15-linux-amd64/etcd* /usr/bin/
|
||||
sudo mv etcd-v3.1.4-linux-amd64/etcd* /usr/bin/
|
||||
```
|
||||
|
||||
All etcd data is stored under the etcd data directory. In a production cluster the data directory should be backed by a persistent disk. Create the etcd data directory:
|
||||
|
@ -59,38 +59,6 @@ All etcd data is stored under the etcd data directory. In a production cluster t
|
|||
sudo mkdir -p /var/lib/etcd
|
||||
```
|
||||
|
||||
The etcd server will be started and managed by systemd. Create the etcd systemd unit file:
|
||||
|
||||
```
|
||||
cat > etcd.service <<"EOF"
|
||||
[Unit]
|
||||
Description=etcd
|
||||
Documentation=https://github.com/coreos
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/etcd --name ETCD_NAME \
|
||||
--cert-file=/etc/etcd/kubernetes.pem \
|
||||
--key-file=/etc/etcd/kubernetes-key.pem \
|
||||
--peer-cert-file=/etc/etcd/kubernetes.pem \
|
||||
--peer-key-file=/etc/etcd/kubernetes-key.pem \
|
||||
--trusted-ca-file=/etc/etcd/ca.pem \
|
||||
--peer-trusted-ca-file=/etc/etcd/ca.pem \
|
||||
--initial-advertise-peer-urls https://INTERNAL_IP:2380 \
|
||||
--listen-peer-urls https://INTERNAL_IP:2380 \
|
||||
--listen-client-urls https://INTERNAL_IP:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://INTERNAL_IP:2379 \
|
||||
--initial-cluster-token etcd-cluster-0 \
|
||||
--initial-cluster controller0=https://10.240.0.10:2380,controller1=https://10.240.0.11:2380,controller2=https://10.240.0.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--data-dir=/var/lib/etcd
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
|
||||
### Set The Internal IP Address
|
||||
|
||||
The internal IP address will be used by etcd to serve client requests and communicate with other etcd peers.
|
||||
|
@ -116,14 +84,37 @@ Each etcd member must have a unique name within an etcd cluster. Set the etcd na
|
|||
ETCD_NAME=controller$(echo $INTERNAL_IP | cut -c 11)
|
||||
```
|
||||
|
||||
Substitute the etcd name and internal IP address:
|
||||
The etcd server will be started and managed by systemd. Create the etcd systemd unit file:
|
||||
|
||||
```
|
||||
sed -i s/INTERNAL_IP/${INTERNAL_IP}/g etcd.service
|
||||
```
|
||||
cat > etcd.service <<EOF
|
||||
[Unit]
|
||||
Description=etcd
|
||||
Documentation=https://github.com/coreos
|
||||
|
||||
```
|
||||
sed -i s/ETCD_NAME/${ETCD_NAME}/g etcd.service
|
||||
[Service]
|
||||
ExecStart=/usr/bin/etcd \\
|
||||
--name ${ETCD_NAME} \\
|
||||
--cert-file=/etc/etcd/kubernetes.pem \\
|
||||
--key-file=/etc/etcd/kubernetes-key.pem \\
|
||||
--peer-cert-file=/etc/etcd/kubernetes.pem \\
|
||||
--peer-key-file=/etc/etcd/kubernetes-key.pem \\
|
||||
--trusted-ca-file=/etc/etcd/ca.pem \\
|
||||
--peer-trusted-ca-file=/etc/etcd/ca.pem \\
|
||||
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
|
||||
--listen-peer-urls https://${INTERNAL_IP}:2380 \\
|
||||
--listen-client-urls https://${INTERNAL_IP}:2379,http://127.0.0.1:2379 \\
|
||||
--advertise-client-urls https://${INTERNAL_IP}:2379 \\
|
||||
--initial-cluster-token etcd-cluster-0 \\
|
||||
--initial-cluster controller0=https://10.240.0.10:2380,controller1=https://10.240.0.11:2380,controller2=https://10.240.0.12:2380 \\
|
||||
--initial-cluster-state new \\
|
||||
--data-dir=/var/lib/etcd
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
|
||||
Once the etcd systemd unit file is ready, move it to the systemd system directory:
|
||||
|
@ -160,7 +151,11 @@ Once all 3 etcd nodes have been bootstrapped verify the etcd cluster is healthy:
|
|||
* On one of the controller nodes run the following command:
|
||||
|
||||
```
|
||||
etcdctl --ca-file=/etc/etcd/ca.pem cluster-health
|
||||
etcdctl \
|
||||
--ca-file=/etc/etcd/ca.pem \
|
||||
--cert-file=/etc/etcd/kubernetes.pem \
|
||||
--key-file=/etc/etcd/kubernetes-key.pem \
|
||||
cluster-health
|
||||
```
|
||||
|
||||
```
|
||||
|
|
|
@ -23,10 +23,88 @@ Each component is being run on the same machines for the following reasons:
|
|||
* Running multiple copies of each component is required for H/A
|
||||
* Running each component next to the API Server eases configuration.
|
||||
|
||||
## Setup Authentication and Authorization
|
||||
|
||||
### Authentication
|
||||
|
||||
[Token based authentication](http://kubernetes.io/docs/admin/authentication) will be used to bootstrap the Kubernetes cluster. The authentication token is used by the following components:
|
||||
|
||||
* kubelet (client)
|
||||
* Kubernetes API Server (server)
|
||||
|
||||
The other components, mainly the `scheduler` and `controller manager`, access the Kubernetes API server locally over the insecure API port which does not require authentication. The insecure port is only enabled for local access.
|
||||
|
||||
Generate a token:
|
||||
|
||||
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
|
||||
|
||||
Generate a token file:
|
||||
|
||||
```
|
||||
cat > token.csv <<EOF
|
||||
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
|
||||
EOF
|
||||
```
|
||||
|
||||
Copy the `token.csv` file to each controller node:
|
||||
|
||||
```
|
||||
KUBERNETES_CONTROLLERS=(controller0 controller1 controller2)
|
||||
```
|
||||
```
|
||||
for host in ${KUBERNETES_CONTROLLERS[*]}; do
|
||||
gcloud compute copy-files token.csv ${host}:~/
|
||||
done
|
||||
```
|
||||
|
||||
Generate a bootstrap kubeconfig file:
|
||||
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes \
|
||||
--format 'value(address)')
|
||||
|
||||
```
|
||||
cat > bootstrap.kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: kubernetes
|
||||
cluster:
|
||||
certificate-authority: /var/lib/kubernetes/ca.pem
|
||||
server: https://${KUBERNETES_PUBLIC_ADDRESS}:6443
|
||||
contexts:
|
||||
- name: kubelet-bootstrap
|
||||
context:
|
||||
cluster: kubernetes
|
||||
user: kubelet-bootstrap
|
||||
current-context: kubelet-bootstrap
|
||||
users:
|
||||
- name: kubelet-bootstrap
|
||||
user:
|
||||
token: ${BOOTSTRAP_TOKEN}
|
||||
EOF
|
||||
```
|
||||
|
||||
Copy the bootstrap kubeconfig file to each worker node:
|
||||
|
||||
```
|
||||
KUBERNETES_WORKER_NODES=(worker0 worker1 worker2)
|
||||
```
|
||||
```
|
||||
for host in ${KUBERNETES_WORKER_NODES[*]}; do
|
||||
gcloud compute copy-files bootstrap.kubeconfig ${host}:~/
|
||||
done
|
||||
```
|
||||
|
||||
## Provision the Kubernetes Controller Cluster
|
||||
|
||||
Run the following commands on `controller0`, `controller1`, `controller2`:
|
||||
|
||||
Copy the bootstrap token into place:
|
||||
|
||||
```
|
||||
sudo mv token.csv /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
### TLS Certificates
|
||||
|
||||
The TLS certificates created in the [Setting up a CA and TLS Cert Generation](02-certificate-authority.md) lab will be used to secure communication between the Kubernetes API server and Kubernetes clients such as `kubectl` and the `kubelet` agent. The TLS certificates will also be used to authenticate the Kubernetes API server to etcd via TLS client auth.
|
||||
|
@ -38,7 +116,7 @@ sudo mkdir -p /var/lib/kubernetes
|
|||
```
|
||||
|
||||
```
|
||||
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
||||
sudo mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
### Download and install the Kubernetes controller binaries
|
||||
|
@ -46,16 +124,16 @@ sudo cp ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
|||
Download the official Kubernetes release binaries:
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kube-apiserver
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kube-apiserver
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kube-controller-manager
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kube-controller-manager
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kube-scheduler
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kube-scheduler
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kubectl
|
||||
```
|
||||
|
||||
Install the Kubernetes binaries:
|
||||
|
@ -70,56 +148,6 @@ sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/bin/
|
|||
|
||||
### Kubernetes API Server
|
||||
|
||||
#### Setup Authentication and Authorization
|
||||
|
||||
##### Authentication
|
||||
|
||||
[Token based authentication](http://kubernetes.io/docs/admin/authentication) will be used to limit access to the Kubernetes API. The authentication token is used by the following components:
|
||||
|
||||
* kubelet (client)
|
||||
* Kubernetes API Server (server)
|
||||
|
||||
The other components, mainly the `scheduler` and `controller manager`, access the Kubernetes API server locally over the insecure API port which does not require authentication. The insecure port is only enabled for local access.
|
||||
|
||||
Download the example token file:
|
||||
|
||||
```
|
||||
wget https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/token.csv
|
||||
```
|
||||
|
||||
Review the example token file and replace the default token.
|
||||
|
||||
```
|
||||
cat token.csv
|
||||
```
|
||||
|
||||
Move the token file into the Kubernetes configuration directory so it can be read by the Kubernetes API server.
|
||||
|
||||
```
|
||||
sudo mv token.csv /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
##### Authorization
|
||||
|
||||
Attribute-Based Access Control (ABAC) will be used to authorize access to the Kubernetes API. In this lab ABAC will be setup using the Kubernetes policy file backend as documented in the [Kubernetes authorization guide](http://kubernetes.io/docs/admin/authorization).
|
||||
|
||||
Download the example authorization policy file:
|
||||
|
||||
```
|
||||
wget https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/authorization-policy.jsonl
|
||||
```
|
||||
|
||||
Review the example authorization policy file. No changes are required.
|
||||
|
||||
```
|
||||
cat authorization-policy.jsonl
|
||||
```
|
||||
|
||||
Move the authorization policy file into the Kubernetes configuration directory so it can be read by the Kubernetes API server.
|
||||
|
||||
```
|
||||
sudo mv authorization-policy.jsonl /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
### Create the systemd unit file
|
||||
|
||||
|
@ -132,42 +160,62 @@ INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \
|
|||
http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip)
|
||||
```
|
||||
|
||||
```
|
||||
CLOUD_PROVIDER=gcp
|
||||
```
|
||||
|
||||
#### AWS
|
||||
|
||||
```
|
||||
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
|
||||
```
|
||||
```
|
||||
CLOUD_PROVIDER=aws
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Create the systemd unit file:
|
||||
|
||||
```
|
||||
cat > kube-apiserver.service <<"EOF"
|
||||
cat > kube-apiserver.service <<EOF
|
||||
[Unit]
|
||||
Description=Kubernetes API Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/kube-apiserver \
|
||||
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \
|
||||
--advertise-address=INTERNAL_IP \
|
||||
--allow-privileged=true \
|
||||
--apiserver-count=3 \
|
||||
--authorization-mode=ABAC \
|
||||
--authorization-policy-file=/var/lib/kubernetes/authorization-policy.jsonl \
|
||||
--bind-address=0.0.0.0 \
|
||||
--enable-swagger-ui=true \
|
||||
--etcd-cafile=/var/lib/kubernetes/ca.pem \
|
||||
--insecure-bind-address=0.0.0.0 \
|
||||
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \
|
||||
--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379,https://10.240.0.12:2379 \
|
||||
--service-account-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||
--service-cluster-ip-range=10.32.0.0/24 \
|
||||
--service-node-port-range=30000-32767 \
|
||||
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
|
||||
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||
--token-auth-file=/var/lib/kubernetes/token.csv \
|
||||
ExecStart=/usr/bin/kube-apiserver \\
|
||||
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
|
||||
--advertise-address=${INTERNAL_IP} \\
|
||||
--allow-privileged=true \\
|
||||
--apiserver-count=3 \\
|
||||
--audit-log-maxage=30 \\
|
||||
--audit-log-maxbackup=3 \\
|
||||
--audit-log-maxsize=100 \\
|
||||
--audit-log-path="/var/lib/audit.log" \\
|
||||
--authorization-mode=RBAC \\
|
||||
--bind-address=0.0.0.0 \\
|
||||
--client-ca-file=/var/lib/kubernetes/ca.pem \\
|
||||
--cloud-provider=${CLOUD_PROVIDER} \\
|
||||
--enable-swagger-ui=true \\
|
||||
--etcd-cafile=/var/lib/kubernetes/ca.pem \\
|
||||
--etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\
|
||||
--etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\
|
||||
--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379,https://10.240.0.12:2379 \\
|
||||
--event-ttl=1h \\
|
||||
--experimental-bootstrap-token-auth \\
|
||||
--insecure-bind-address=0.0.0.0 \\
|
||||
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\
|
||||
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\
|
||||
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\
|
||||
--kubelet-https=true \\
|
||||
--runtime-config=rbac.authorization.k8s.io/v1alpha1 \\
|
||||
--service-account-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
|
||||
--service-cluster-ip-range=10.32.0.0/24 \\
|
||||
--service-node-port-range=30000-32767 \\
|
||||
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\
|
||||
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
|
||||
--token-auth-file=/var/lib/kubernetes/token.csv \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
@ -176,17 +224,13 @@ RestartSec=5
|
|||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
- Note: If you are deploying this on AWS then you should add ``--cloud-provider=aws`` in the ``kube-apiserver.service`` unit file's [service] section. If you are adding this before ``--v=2`` line, remember to add ``\`` character at the end
|
||||
|
||||
```
|
||||
sed -i s/INTERNAL_IP/$INTERNAL_IP/g kube-apiserver.service
|
||||
```
|
||||
Start the `kube-apiserver` service:
|
||||
|
||||
```
|
||||
sudo mv kube-apiserver.service /etc/systemd/system/
|
||||
```
|
||||
|
||||
|
||||
```
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable kube-apiserver
|
||||
|
@ -200,21 +244,25 @@ sudo systemctl status kube-apiserver --no-pager
|
|||
### Kubernetes Controller Manager
|
||||
|
||||
```
|
||||
cat > kube-controller-manager.service <<"EOF"
|
||||
cat > kube-controller-manager.service <<EOF
|
||||
[Unit]
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/kube-controller-manager \
|
||||
--allocate-node-cidrs=true \
|
||||
--cluster-cidr=10.200.0.0/16 \
|
||||
--cluster-name=kubernetes \
|
||||
--leader-elect=true \
|
||||
--master=http://INTERNAL_IP:8080 \
|
||||
--root-ca-file=/var/lib/kubernetes/ca.pem \
|
||||
--service-account-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||
--service-cluster-ip-range=10.32.0.0/16 \
|
||||
ExecStart=/usr/bin/kube-controller-manager \\
|
||||
--address=0.0.0.0 \\
|
||||
--allocate-node-cidrs=true \\
|
||||
--cloud-provider=${CLOUD_PROVIDER} \\
|
||||
--cluster-cidr=10.200.0.0/16 \\
|
||||
--cluster-name=kubernetes \\
|
||||
--cluster-signing-cert-file="/var/lib/kubernetes/ca.pem" \\
|
||||
--cluster-signing-key-file="/var/lib/kubernetes/ca-key.pem" \\
|
||||
--leader-elect=true \\
|
||||
--master=http://${INTERNAL_IP}:8080 \\
|
||||
--root-ca-file=/var/lib/kubernetes/ca.pem \\
|
||||
--service-account-private-key-file=/var/lib/kubernetes/ca-key.pem \\
|
||||
--service-cluster-ip-range=10.32.0.0/16 \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
@ -223,18 +271,13 @@ RestartSec=5
|
|||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
- Note: If you are deploying this on AWS then you should add ``--cloud-provider=aws`` in the ``kube-controller-manager.service`` unit file's [service] section. If you are adding this before ``--v=2`` line , remember to add ``\`` character at the end.
|
||||
|
||||
|
||||
```
|
||||
sed -i s/INTERNAL_IP/$INTERNAL_IP/g kube-controller-manager.service
|
||||
```
|
||||
Start the `kube-controller-manager` service:
|
||||
|
||||
```
|
||||
sudo mv kube-controller-manager.service /etc/systemd/system/
|
||||
```
|
||||
|
||||
|
||||
```
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable kube-controller-manager
|
||||
|
@ -248,15 +291,15 @@ sudo systemctl status kube-controller-manager --no-pager
|
|||
### Kubernetes Scheduler
|
||||
|
||||
```
|
||||
cat > kube-scheduler.service <<"EOF"
|
||||
cat > kube-scheduler.service <<EOF
|
||||
[Unit]
|
||||
Description=Kubernetes Scheduler
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/kube-scheduler \
|
||||
--leader-elect=true \
|
||||
--master=http://INTERNAL_IP:8080 \
|
||||
ExecStart=/usr/bin/kube-scheduler \\
|
||||
--leader-elect=true \\
|
||||
--master=http://${INTERNAL_IP}:8080 \\
|
||||
--v=2
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
@ -266,9 +309,7 @@ WantedBy=multi-user.target
|
|||
EOF
|
||||
```
|
||||
|
||||
```
|
||||
sed -i s/INTERNAL_IP/$INTERNAL_IP/g kube-scheduler.service
|
||||
```
|
||||
Start the `kube-scheduler` service:
|
||||
|
||||
```
|
||||
sudo mv kube-scheduler.service /etc/systemd/system/
|
||||
|
@ -343,3 +384,13 @@ aws elb register-instances-with-load-balancer \
|
|||
--load-balancer-name kubernetes \
|
||||
--instances ${CONTROLLER_0_INSTANCE_ID} ${CONTROLLER_1_INSTANCE_ID} ${CONTROLLER_2_INSTANCE_ID}
|
||||
```
|
||||
|
||||
## RBAC
|
||||
|
||||
Set up bootstrapping roles:
|
||||
|
||||
```
|
||||
kubectl create clusterrolebinding kubelet-bootstrap \
|
||||
--clusterrole=system:node-bootstrapper \
|
||||
--user=kubelet-bootstrap
|
||||
```
|
||||
|
|
|
@ -20,6 +20,33 @@ Some people would like to run workers and cluster services anywhere in the clust
|
|||
|
||||
Run the following commands on `worker0`, `worker1`, `worker2`:
|
||||
|
||||
### Set the Kubernetes Public Address
|
||||
|
||||
#### GCE
|
||||
|
||||
```
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes \
|
||||
--format 'value(address)')
|
||||
```
|
||||
|
||||
#### AWS
|
||||
|
||||
```
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(aws elb describe-load-balancers \
|
||||
--load-balancer-name kubernetes | \
|
||||
jq -r '.LoadBalancerDescriptions[].DNSName')
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
sudo mkdir -p /var/lib/kubelet
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv bootstrap.kubeconfig /var/lib/kubelet
|
||||
```
|
||||
|
||||
#### Move the TLS certificates in place
|
||||
|
||||
```
|
||||
|
@ -27,19 +54,17 @@ sudo mkdir -p /var/lib/kubernetes
|
|||
```
|
||||
|
||||
```
|
||||
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
|
||||
sudo mv ca.pem /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
#### Docker
|
||||
|
||||
Kubernetes should be compatible with the Docker 1.9.x - 1.12.x:
|
||||
|
||||
```
|
||||
wget https://get.docker.com/builds/Linux/x86_64/docker-1.12.1.tgz
|
||||
wget https://get.docker.com/builds/Linux/x86_64/docker-1.12.6.tgz
|
||||
```
|
||||
|
||||
```
|
||||
tar -xvf docker-1.12.1.tgz
|
||||
tar -xvf docker-1.12.6.tgz
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -50,22 +75,28 @@ Create the Docker systemd unit file:
|
|||
|
||||
|
||||
```
|
||||
sudo sh -c 'echo "[Unit]
|
||||
cat > docker.service <<EOF
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/docker daemon \
|
||||
--iptables=false \
|
||||
--ip-masq=false \
|
||||
--host=unix:///var/run/docker.sock \
|
||||
--log-level=error \
|
||||
ExecStart=/usr/bin/docker daemon \\
|
||||
--iptables=false \\
|
||||
--ip-masq=false \\
|
||||
--host=unix:///var/run/docker.sock \\
|
||||
--log-level=error \\
|
||||
--storage-driver=overlay
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target" > /etc/systemd/system/docker.service'
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv docker.service /etc/systemd/system/docker.service
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -90,24 +121,24 @@ sudo mkdir -p /opt/cni
|
|||
```
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
|
||||
wget https://storage.googleapis.com/kubernetes-release/network-plugins/cni-amd64-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz
|
||||
```
|
||||
|
||||
```
|
||||
sudo tar -xvf cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz -C /opt/cni
|
||||
sudo tar -xvf cni-amd64-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz -C /opt/cni
|
||||
```
|
||||
|
||||
|
||||
Download and install the Kubernetes worker binaries:
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kubectl
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kube-proxy
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kube-proxy
|
||||
```
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kubelet
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kubelet
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -118,61 +149,42 @@ chmod +x kubectl kube-proxy kubelet
|
|||
sudo mv kubectl kube-proxy kubelet /usr/bin/
|
||||
```
|
||||
|
||||
```
|
||||
sudo mkdir -p /var/lib/kubelet/
|
||||
```
|
||||
|
||||
```
|
||||
sudo sh -c 'echo "apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /var/lib/kubernetes/ca.pem
|
||||
server: https://10.240.0.10:6443
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: kubelet
|
||||
name: kubelet
|
||||
current-context: kubelet
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: chAng3m3" > /var/lib/kubelet/kubeconfig'
|
||||
```
|
||||
|
||||
Create the kubelet systemd unit file:
|
||||
|
||||
```
|
||||
sudo sh -c 'echo "[Unit]
|
||||
cat > kubelet.service <<EOF
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/kubelet \
|
||||
--allow-privileged=true \
|
||||
--api-servers=https://10.240.0.10:6443,https://10.240.0.11:6443,https://10.240.0.12:6443 \
|
||||
--cloud-provider= \
|
||||
--cluster-dns=10.32.0.10 \
|
||||
--cluster-domain=cluster.local \
|
||||
--container-runtime=docker \
|
||||
--docker=unix:///var/run/docker.sock \
|
||||
--network-plugin=kubenet \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--reconcile-cidr=true \
|
||||
--serialize-image-pulls=false \
|
||||
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
|
||||
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||
ExecStart=/usr/bin/kubelet \\
|
||||
--api-servers=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \\
|
||||
--allow-privileged=true \\
|
||||
--cloud-provider=auto-detect \\
|
||||
--cluster-dns=10.32.0.10 \\
|
||||
--cluster-domain=cluster.local \\
|
||||
--container-runtime=docker \\
|
||||
--experimental-bootstrap-kubeconfig=/var/lib/kubelet/bootstrap.kubeconfig \\
|
||||
--network-plugin=kubenet \\
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
||||
--serialize-image-pulls=false \\
|
||||
--register-node=true \\
|
||||
--tls-cert-file=/var/run/kubernetes/kubelet-client.crt \\
|
||||
--tls-private-key-file=/var/run/kubernetes/kubelet-client.key \\
|
||||
--v=2
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target" > /etc/systemd/system/kubelet.service'
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv kubelet.service /etc/systemd/system/kubelet.service
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -190,22 +202,26 @@ sudo systemctl status kubelet --no-pager
|
|||
|
||||
|
||||
```
|
||||
sudo sh -c 'echo "[Unit]
|
||||
cat > kube-proxy.service <<EOF
|
||||
[Unit]
|
||||
Description=Kubernetes Kube Proxy
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/kube-proxy \
|
||||
--master=https://10.240.0.10:6443 \
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||
--proxy-mode=iptables \
|
||||
ExecStart=/usr/bin/kube-proxy \\
|
||||
--master=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \\
|
||||
--kubeconfig=/var/lib/kubelet/kubeconfig \\
|
||||
--proxy-mode=iptables \\
|
||||
--v=2
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target" > /etc/systemd/system/kube-proxy.service'
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv kube-proxy.service /etc/systemd/system/kube-proxy.service
|
||||
```
|
||||
|
||||
```
|
||||
|
|
|
@ -7,7 +7,7 @@ Run the following commands from the machine which will be your Kubernetes Client
|
|||
### OS X
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/darwin/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/darwin/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin
|
||||
```
|
||||
|
@ -15,7 +15,7 @@ sudo mv kubectl /usr/local/bin
|
|||
### Linux
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.5.1/bin/linux/amd64/kubectl
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.4/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin
|
||||
```
|
||||
|
@ -40,13 +40,6 @@ KUBERNETES_PUBLIC_ADDRESS=$(aws elb describe-load-balancers \
|
|||
```
|
||||
---
|
||||
|
||||
Recall the token we setup for the admin user:
|
||||
|
||||
```
|
||||
# /var/lib/kubernetes/token.csv on the controller nodes
|
||||
chAng3m3,admin,admin
|
||||
```
|
||||
|
||||
Also be sure to locate the CA certificate [created earlier](02-certificate-authority.md). Since we are using self-signed TLS certs we need to trust the CA certificate so we can verify the remote API Servers.
|
||||
|
||||
### Build up the kubeconfig entry
|
||||
|
@ -61,7 +54,9 @@ kubectl config set-cluster kubernetes-the-hard-way \
|
|||
```
|
||||
|
||||
```
|
||||
kubectl config set-credentials admin --token chAng3m3
|
||||
kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem
|
||||
```
|
||||
|
||||
```
|
||||
|
|
Loading…
Reference in New Issue