diff --git a/docs/03-compute-resources.md b/docs/03-compute-resources.md index 463dbd0..7f22946 100644 --- a/docs/03-compute-resources.md +++ b/docs/03-compute-resources.md @@ -307,7 +307,7 @@ Let's build an SSH config file to easily be able to SSH to all our controller an ``` for instance in controller-0 controller-1 controller-2 worker-0 worker-1 worker-2; do - EXTERNAL_IP=$(az vm show --show-details -g kubernetes-the-hard-way -n ${instance} --output tsv | cut -f19) + EXTERNAL_IP=$(az vm show --show-details -g kubernetes-the-hard-way -n ${instance} --query publicIps --output tsv) cat < Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`. diff --git a/docs/08-bootstrapping-kubernetes-controllers.md b/docs/08-bootstrapping-kubernetes-controllers.md index 6be2ce7..d20acbc 100644 --- a/docs/08-bootstrapping-kubernetes-controllers.md +++ b/docs/08-bootstrapping-kubernetes-controllers.md @@ -37,22 +37,18 @@ wget -q --show-progress --https-only --timestamping \ Install the Kubernetes binaries: ``` -{ - chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl - sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/ -} +chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl +sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/ ``` ### Configure the Kubernetes API Server ``` -{ sudo mkdir -p /var/lib/kubernetes/ sudo mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \ service-account-key.pem service-account.pem \ encryption-config.yaml /var/lib/kubernetes/ -} ``` The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance: @@ -190,11 +186,9 @@ EOF ### Start the Controller Services ``` -{ - sudo systemctl daemon-reload - sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler - sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler -} +sudo systemctl daemon-reload +sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler +sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler ``` > Allow up to 10 seconds for the Kubernetes API Server to fully initialize. @@ -226,12 +220,11 @@ EOF ``` ``` -{ - sudo mv kubernetes.default.svc.cluster.local \ - /etc/nginx/sites-available/kubernetes.default.svc.cluster.local - sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/ -} +sudo mv kubernetes.default.svc.cluster.local \ + /etc/nginx/sites-available/kubernetes.default.svc.cluster.local + +sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/ ``` ``` @@ -339,7 +332,7 @@ EOF Retrieve the `kubernetes-the-hard-way` static IP address: ``` -KUBERNETES_PUBLIC_ADDRESS=$(az network public-ip show -g kubernetes-the-hard-way -n kubernetes-the-hard-way-ip --output tsv | cut -f6) +KUBERNETES_PUBLIC_ADDRESS=$(az network public-ip show -g kubernetes-the-hard-way -n kubernetes-the-hard-way-ip --query ipAddress --output tsv) ``` Make a HTTP request for the Kubernetes version info: diff --git a/docs/09-bootstrapping-kubernetes-workers.md b/docs/09-bootstrapping-kubernetes-workers.md index ad9da62..2af062a 100644 --- a/docs/09-bootstrapping-kubernetes-workers.md +++ b/docs/09-bootstrapping-kubernetes-workers.md @@ -19,10 +19,8 @@ ssh worker-0 Install the OS dependencies: ``` -{ - sudo apt-get update - sudo apt-get -y install socat conntrack ipset -} +sudo apt-get update +sudo apt-get -y install socat conntrack ipset ``` > The socat binary enables support for the `kubectl port-forward` command. @@ -56,7 +54,6 @@ sudo mkdir -p \ Install the worker binaries: ``` -{ sudo mv runsc-50c283b9f56bb7200938d9e207355f05f79f0d17 runsc sudo mv runc.amd64 runc chmod +x kubectl kube-proxy kubelet runc runsc @@ -64,7 +61,6 @@ Install the worker binaries: sudo tar -xvf crictl-v1.12.0-linux-amd64.tar.gz -C /usr/local/bin/ sudo tar -xvf cni-plugins-amd64-v0.6.0.tgz -C /opt/cni/bin/ sudo tar -xvf containerd-1.2.0-rc.0.linux-amd64.tar.gz -C / -} ``` ### Configure CNI Networking @@ -167,11 +163,9 @@ EOF ### Configure the Kubelet ``` -{ - sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ - sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig - sudo mv ca.pem /var/lib/kubernetes/ -} +sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ +sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig +sudo mv ca.pem /var/lib/kubernetes/ ``` Create the `kubelet-config.yaml` configuration file: @@ -271,11 +265,9 @@ EOF ### Start the Worker Services ``` -{ - sudo systemctl daemon-reload - sudo systemctl enable containerd kubelet kube-proxy - sudo systemctl start containerd kubelet kube-proxy -} +sudo systemctl daemon-reload +sudo systemctl enable containerd kubelet kube-proxy +sudo systemctl start containerd kubelet kube-proxy ``` > Remember to run the above commands on each worker node: `worker-0`, `worker-1`, and `worker-2`. diff --git a/docs/10-configuring-kubectl.md b/docs/10-configuring-kubectl.md index 16ac6ec..baa9b95 100644 --- a/docs/10-configuring-kubectl.md +++ b/docs/10-configuring-kubectl.md @@ -11,24 +11,22 @@ Each kubeconfig requires a Kubernetes API Server to connect to. To support high Generate a kubeconfig file suitable for authenticating as the `admin` user: ``` -{ - KUBERNETES_PUBLIC_ADDRESS=$(az network public-ip show -g kubernetes-the-hard-way -n kubernetes-the-hard-way-ip --output tsv | cut -f6) +KUBERNETES_PUBLIC_ADDRESS=$(az network public-ip show -g kubernetes-the-hard-way -n kubernetes-the-hard-way-ip --query ipAddress --output tsv) - kubectl config set-cluster kubernetes-the-hard-way \ - --certificate-authority=ca.pem \ - --embed-certs=true \ - --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 +kubectl config set-cluster kubernetes-the-hard-way \ + --certificate-authority=ca.pem \ + --embed-certs=true \ + --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 - kubectl config set-credentials admin \ - --client-certificate=admin.pem \ - --client-key=admin-key.pem +kubectl config set-credentials admin \ + --client-certificate=admin.pem \ + --client-key=admin-key.pem - kubectl config set-context kubernetes-the-hard-way \ - --cluster=kubernetes-the-hard-way \ - --user=admin +kubectl config set-context kubernetes-the-hard-way \ + --cluster=kubernetes-the-hard-way \ + --user=admin - kubectl config use-context kubernetes-the-hard-way -} +kubectl config use-context kubernetes-the-hard-way ``` ## Verification diff --git a/docs/13-smoke-test.md b/docs/13-smoke-test.md index 8d69458..e865a01 100644 --- a/docs/13-smoke-test.md +++ b/docs/13-smoke-test.md @@ -16,8 +16,7 @@ kubectl create secret generic kubernetes-the-hard-way \ Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd: ``` -EXTERNAL_IP=$(az vm show --show-details -g kubernetes-the-hard-way -n controller-0 --output tsv | cut -f19) -ssh azureuser@${EXTERNAL_IP} +ssh controller-0 sudo ETCDCTL_API=3 etcdctl get \ --endpoints=https://127.0.0.1:2379 \ @@ -195,7 +194,7 @@ az network nsg rule create \ Retrieve the external IP address of a worker instance: ``` -EXTERNAL_IP=$(az vm show --show-details -g kubernetes-the-hard-way -n worker-0 --output tsv | cut -f19) +EXTERNAL_IP=$(az vm show --show-details -g kubernetes-the-hard-way -n worker-0 --query publicIps --output tsv) ``` Make an HTTP request using the external IP address and the `nginx` node port: @@ -265,7 +264,7 @@ INSTANCE_NAME=$(kubectl get pod untrusted --output=jsonpath='{.spec.nodeName}') SSH into the worker node: ``` -EXTERNAL_IP=$(az vm show --show-details -g kubernetes-the-hard-way -n ${INSTANCE_NAME} --output tsv | cut -f19) +EXTERNAL_IP=$(az vm show --show-details -g kubernetes-the-hard-way -n ${INSTANCE_NAME} --query publicIps --output tsv) ssh azureuser@${EXTERNAL_IP} ```