Added scripts to automate setting up cluster
Scripts support custom number of controllers and workers as well as a specific version of Kubernetes by exporting NUM_CONTROLLERS, NUM_WORKERS, and KUBERNETES_VERSION respectively. This has only been tested using 3 controllers and workers >= 3 using Kubernetes version 1.5.1.pull/118/head
parent
753e71bac8
commit
e8f056a2ba
|
@ -0,0 +1,171 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} || -z ${KUBERNETES_VERSION} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS, NUM_WORKERS and KUBERNETES_VERSION environment variables"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( NUM_CONTROLLERS-- ))
|
||||||
|
(( NUM_WORKERS-- ))
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
ETCD_SERVERS="${ETCD_SERVERS}https://10.240.0.1${i}:2379,"
|
||||||
|
done
|
||||||
|
|
||||||
|
ETCD_SERVERS=$(echo ${ETCD_SERVERS} | sed 's/,$//')
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mkdir -p /var/lib/kubernetes"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "sudo cp ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "wget https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kube-apiserver"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "wget https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kube-controller-manager"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "wget https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kube-scheduler"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "wget https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/bin/"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "wget https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/token.csv"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "cat token.csv"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv token.csv /var/lib/kubernetes/"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "wget https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/authorization-policy.jsonl"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "cat authorization-policy.jsonl"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv authorization-policy.jsonl /var/lib/kubernetes/"
|
||||||
|
|
||||||
|
INTERNAL_IP=$(gcloud compute ssh controller${i} --command 'curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip')
|
||||||
|
|
||||||
|
# kube-apiserver
|
||||||
|
gcloud compute ssh controller${i} --command "echo '[Unit]
|
||||||
|
Description=Kubernetes API Server
|
||||||
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/kube-apiserver \
|
||||||
|
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \
|
||||||
|
--advertise-address=${INTERNAL_IP} \
|
||||||
|
--allow-privileged=true \
|
||||||
|
--apiserver-count=3 \
|
||||||
|
--authorization-mode=ABAC \
|
||||||
|
--authorization-policy-file=/var/lib/kubernetes/authorization-policy.jsonl \
|
||||||
|
--bind-address=0.0.0.0 \
|
||||||
|
--enable-swagger-ui=true \
|
||||||
|
--etcd-cafile=/var/lib/kubernetes/ca.pem \
|
||||||
|
--insecure-bind-address=0.0.0.0 \
|
||||||
|
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \
|
||||||
|
--etcd-servers=${ETCD_SERVERS} \
|
||||||
|
--service-account-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||||
|
--service-cluster-ip-range=10.32.0.0/24 \
|
||||||
|
--service-node-port-range=30000-32767 \
|
||||||
|
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
|
||||||
|
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||||
|
--token-auth-file=/var/lib/kubernetes/token.csv \
|
||||||
|
--v=2
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target' > kube-apiserver.service"
|
||||||
|
|
||||||
|
|
||||||
|
#gcloud compute ssh controller${i} --command 'INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip); sed -i s/INTERNAL_IP/${INTERNAL_IP}/g kube-apiserver.service'
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv kube-apiserver.service /etc/systemd/system/"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl daemon-reload"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl enable kube-apiserver"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl start kube-apiserver"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl status kube-apiserver --no-pager"
|
||||||
|
|
||||||
|
# kube-controller-manager
|
||||||
|
#gcloud compute copy-files kube-controller-manager.service controller${i}:~/
|
||||||
|
gcloud compute ssh controller${i} --command "echo '[Unit]
|
||||||
|
Description=Kubernetes Controller Manager
|
||||||
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/kube-controller-manager \
|
||||||
|
--allocate-node-cidrs=true \
|
||||||
|
--cluster-cidr=10.200.0.0/16 \
|
||||||
|
--cluster-name=kubernetes \
|
||||||
|
--leader-elect=true \
|
||||||
|
--master=http://${INTERNAL_IP}:8080 \
|
||||||
|
--root-ca-file=/var/lib/kubernetes/ca.pem \
|
||||||
|
--service-account-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||||
|
--service-cluster-ip-range=10.32.0.0/24 \
|
||||||
|
--v=2
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target' > kube-controller-manager.service"
|
||||||
|
|
||||||
|
#gcloud compute ssh controller${i} --command 'INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip); sed -i s/INTERNAL_IP/${INTERNAL_IP}/g kube-controller-manager.service'
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv kube-controller-manager.service /etc/systemd/system/"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl daemon-reload"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl enable kube-controller-manager"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl start kube-controller-manager"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl status kube-controller-manager --no-pager"
|
||||||
|
|
||||||
|
# kube-scheduler
|
||||||
|
#gcloud compute copy-files kube-scheduler.service controller${i}:~/
|
||||||
|
gcloud compute ssh controller${i} --command "echo '[Unit]
|
||||||
|
Description=Kubernetes Scheduler
|
||||||
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/kube-scheduler \
|
||||||
|
--leader-elect=true \
|
||||||
|
--master=http://${INTERNAL_IP}:8080 \
|
||||||
|
--v=2
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target' > kube-scheduler.service"
|
||||||
|
|
||||||
|
#gcloud compute ssh controller${i} --command 'INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip); sed -i s/INTERNAL_IP/${INTERNAL_IP}/g kube-scheduler.service'
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv kube-scheduler.service /etc/systemd/system/"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl daemon-reload"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl enable kube-scheduler"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl start kube-scheduler"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl status kube-scheduler --no-pager"
|
||||||
|
|
||||||
|
# Verify components
|
||||||
|
gcloud compute ssh controller${i} --command "kubectl get componentstatuses"
|
||||||
|
done
|
||||||
|
|
||||||
|
gcloud compute http-health-checks create kube-apiserver-check \
|
||||||
|
--description "Kubernetes API Server Health Check" \
|
||||||
|
--port 8080 \
|
||||||
|
--request-path /healthz
|
||||||
|
|
||||||
|
gcloud compute target-pools create kubernetes-pool \
|
||||||
|
--http-health-check=kube-apiserver-check
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
hosts="${hosts}controller${i},"
|
||||||
|
done
|
||||||
|
|
||||||
|
hosts=$(echo ${hosts} | sed 's/,$//')
|
||||||
|
|
||||||
|
gcloud compute target-pools add-instances kubernetes-pool \
|
||||||
|
--instances ${hosts}
|
||||||
|
|
||||||
|
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes \
|
||||||
|
--format 'value(address)')
|
||||||
|
|
||||||
|
gcloud compute forwarding-rules create kubernetes-rule \
|
||||||
|
--address ${KUBERNETES_PUBLIC_ADDRESS} \
|
||||||
|
--ports 6443 \
|
||||||
|
--target-pool kubernetes-pool \
|
||||||
|
--region us-west1
|
|
@ -0,0 +1,67 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS and NUM_WORKERS environment variables"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( NUM_CONTROLLERS-- ))
|
||||||
|
(( NUM_WORKERS-- ))
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
INITIAL_CLUSTER="${INITIAL_CLUSTER}controller${i}=https://10.240.0.1${i}:2380,"
|
||||||
|
done
|
||||||
|
|
||||||
|
INITIAL_CLUSTER=$(echo ${INITIAL_CLUSTER} | sed 's/,$//')
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mkdir -p /etc/etcd/"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "wget https://github.com/coreos/etcd/releases/download/v3.0.10/etcd-v3.0.10-linux-amd64.tar.gz"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "tar -xvf etcd-v3.0.10-linux-amd64.tar.gz"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv etcd-v3.0.10-linux-amd64/etcd* /usr/bin/"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mkdir -p /var/lib/etcd"
|
||||||
|
|
||||||
|
INTERNAL_IP=$(gcloud compute ssh controller${i} --command 'curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip')
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "echo '[Unit]
|
||||||
|
Description=etcd
|
||||||
|
Documentation=https://github.com/coreos
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/etcd --name controller${i} \
|
||||||
|
--cert-file=/etc/etcd/kubernetes.pem \
|
||||||
|
--key-file=/etc/etcd/kubernetes-key.pem \
|
||||||
|
--peer-cert-file=/etc/etcd/kubernetes.pem \
|
||||||
|
--peer-key-file=/etc/etcd/kubernetes-key.pem \
|
||||||
|
--trusted-ca-file=/etc/etcd/ca.pem \
|
||||||
|
--peer-trusted-ca-file=/etc/etcd/ca.pem \
|
||||||
|
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \
|
||||||
|
--listen-peer-urls https://${INTERNAL_IP}:2380 \
|
||||||
|
--listen-client-urls https://${INTERNAL_IP}:2379,http://127.0.0.1:2379 \
|
||||||
|
--advertise-client-urls https://${INTERNAL_IP}:2379 \
|
||||||
|
--initial-cluster-token etcd-cluster-0 \
|
||||||
|
--initial-cluster ${INITIAL_CLUSTER} \
|
||||||
|
--initial-cluster-state new \
|
||||||
|
--data-dir=/var/lib/etcd
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target' > etcd.service"
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "cat etcd.service"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo mv etcd.service /etc/systemd/system/"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl daemon-reload"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl enable etcd"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl start etcd"
|
||||||
|
gcloud compute ssh controller${i} --command "sudo systemctl status etcd --no-pager"
|
||||||
|
done
|
||||||
|
|
||||||
|
gcloud compute ssh controller${i} --command "etcdctl --ca-file=/etc/etcd/ca.pem cluster-health"
|
|
@ -0,0 +1,137 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
DOCKER_VERSION=1.12.5
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} || -z ${KUBERNETES_VERSION} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS, NUM_WORKERS and KUBERNETES_VERSION environment variables"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( NUM_CONTROLLERS-- ))
|
||||||
|
(( NUM_WORKERS-- ))
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
API_SERVERS="${API_SERVERS}https://10.240.0.1${i}:6443,"
|
||||||
|
done
|
||||||
|
|
||||||
|
API_SERVERS=$(echo ${API_SERVERS} | sed 's/,$//')
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
gcloud compute ssh worker${i} --command "sudo mkdir -p /var/lib/kubernetes"
|
||||||
|
|
||||||
|
gcloud compute ssh worker${i} --command "sudo cp ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/"
|
||||||
|
|
||||||
|
# docker
|
||||||
|
gcloud compute ssh worker${i} --command "wget https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz"
|
||||||
|
gcloud compute ssh worker${i} --command "tar -xvf docker-${DOCKER_VERSION}.tgz"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo cp docker/docker* /usr/bin/"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo sh -c 'echo \"[Unit]
|
||||||
|
Description=Docker Application Container Engine
|
||||||
|
Documentation=http://docs.docker.io
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/docker daemon \
|
||||||
|
--iptables=false \
|
||||||
|
--ip-masq=false \
|
||||||
|
--host=unix:///var/run/docker.sock \
|
||||||
|
--log-level=error \
|
||||||
|
--storage-driver=overlay
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target\" > /etc/systemd/system/docker.service'"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl daemon-reload"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl enable docker"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl start docker"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo docker version"
|
||||||
|
|
||||||
|
|
||||||
|
# Download CNI and kubernetes components
|
||||||
|
gcloud compute ssh worker${i} --command "sudo mkdir -p /opt/cni"
|
||||||
|
gcloud compute ssh worker${i} --command "wget https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo tar -xvf cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz -C /opt/cni"
|
||||||
|
gcloud compute ssh worker${i} --command "wget https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||||
|
gcloud compute ssh worker${i} --command "wget https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kube-proxy"
|
||||||
|
gcloud compute ssh worker${i} --command "wget https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubelet"
|
||||||
|
gcloud compute ssh worker${i} --command "chmod +x kubectl kube-proxy kubelet"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo mv kubectl kube-proxy kubelet /usr/bin/"
|
||||||
|
|
||||||
|
# Setup kubelet and kube-proxy
|
||||||
|
gcloud compute ssh worker${i} --command "sudo mkdir -p /var/lib/kubelet/"
|
||||||
|
|
||||||
|
# kubelet
|
||||||
|
gcloud compute ssh worker${i} --command "sudo sh -c 'echo \"apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
certificate-authority: /var/lib/kubernetes/ca.pem
|
||||||
|
server: https://10.240.0.10:6443
|
||||||
|
name: kubernetes
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: kubernetes
|
||||||
|
user: kubelet
|
||||||
|
name: kubelet
|
||||||
|
current-context: kubelet
|
||||||
|
users:
|
||||||
|
- name: kubelet
|
||||||
|
user:
|
||||||
|
token: chAng3m3\" > /var/lib/kubelet/kubeconfig'"
|
||||||
|
|
||||||
|
gcloud compute ssh worker${i} --command "sudo sh -c 'echo \"[Unit]
|
||||||
|
Description=Kubernetes Kubelet
|
||||||
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
|
After=docker.service
|
||||||
|
Requires=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/kubelet \
|
||||||
|
--allow-privileged=true \
|
||||||
|
--api-servers=${API_SERVERS} \
|
||||||
|
--cloud-provider= \
|
||||||
|
--cluster-dns=10.32.0.10 \
|
||||||
|
--cluster-domain=cluster.local \
|
||||||
|
--container-runtime=docker \
|
||||||
|
--docker=unix:///var/run/docker.sock \
|
||||||
|
--network-plugin=kubenet \
|
||||||
|
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||||
|
--reconcile-cidr=true \
|
||||||
|
--serialize-image-pulls=false \
|
||||||
|
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
|
||||||
|
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
|
||||||
|
--v=2
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target\" > /etc/systemd/system/kubelet.service'"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl daemon-reload"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl enable kubelet"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl start kubelet"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl status kubelet --no-pager"
|
||||||
|
|
||||||
|
# kube-proxy
|
||||||
|
gcloud compute ssh worker${i} --command "sudo sh -c 'echo \"[Unit]
|
||||||
|
Description=Kubernetes Kube Proxy
|
||||||
|
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/kube-proxy \
|
||||||
|
--master=https://10.240.0.10:6443 \
|
||||||
|
--kubeconfig=/var/lib/kubelet/kubeconfig \
|
||||||
|
--proxy-mode=iptables \
|
||||||
|
--v=2
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target\" > /etc/systemd/system/kube-proxy.service'"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl daemon-reload"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl enable kube-proxy"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl start kube-proxy"
|
||||||
|
gcloud compute ssh worker${i} --command "sudo systemctl status kube-proxy --no-pager"
|
||||||
|
done
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS and NUM_WORKERS env vars"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( NUM_CONTROLLERS-- ))
|
||||||
|
(( NUM_WORKERS-- ))
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
hosts="${hosts}controller${i} "
|
||||||
|
done
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
hosts="${hosts}worker${i} "
|
||||||
|
done
|
||||||
|
|
||||||
|
gcloud -q compute instances delete ${hosts}
|
||||||
|
|
||||||
|
gcloud -q compute forwarding-rules delete kubernetes-rule --region us-west1
|
||||||
|
|
||||||
|
gcloud -q compute target-pools delete kubernetes-pool
|
||||||
|
|
||||||
|
gcloud -q compute http-health-checks delete kube-apiserver-check
|
||||||
|
|
||||||
|
gcloud -q compute addresses delete kubernetes
|
||||||
|
|
||||||
|
gcloud -q compute firewall-rules delete \
|
||||||
|
kubernetes-allow-api-server \
|
||||||
|
kubernetes-allow-healthz \
|
||||||
|
kubernetes-allow-icmp \
|
||||||
|
kubernetes-allow-internal \
|
||||||
|
kubernetes-allow-rdp \
|
||||||
|
kubernetes-nginx-service \
|
||||||
|
kubernetes-allow-ssh \
|
||||||
|
kubernetes-nginx-service
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
gcloud -q compute routes delete kubernetes-route-10-200-${i}-0-24
|
||||||
|
done
|
||||||
|
|
||||||
|
gcloud -q compute networks subnets delete kubernetes
|
||||||
|
|
||||||
|
gcloud -q compute networks delete kubernetes
|
|
@ -0,0 +1,17 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS and NUM_WORKERS environment variables"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( NUM_CONTROLLERS-- ))
|
||||||
|
(( NUM_WORKERS-- ))
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
gcloud compute routes create kubernetes-route-10-200-${i}-0-24 \
|
||||||
|
--network kubernetes \
|
||||||
|
--next-hop-address 10.240.0.2${i} \
|
||||||
|
--destination-range 10.200.${i}.0/24
|
||||||
|
done
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/services/kubedns.yaml
|
||||||
|
|
||||||
|
kubectl --namespace=kube-system get svc
|
||||||
|
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/deployments/kubedns.yaml
|
||||||
|
|
||||||
|
kubectl --namespace=kube-system get pods
|
|
@ -0,0 +1,18 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} || -z ${KUBERNETES_VERSION} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS, NUM_WORKERS and KUBERNETES_VERSION environment variables"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
./start-infra-gcp.sh
|
||||||
|
./setup-ca.sh
|
||||||
|
./bootstrap-etcd.sh
|
||||||
|
./bootstrap-controllers.sh
|
||||||
|
./bootstrap-workers.sh
|
||||||
|
./kubectl-remote-access.sh
|
||||||
|
./create-routes.sh
|
||||||
|
./deploy-dns.sh
|
||||||
|
./smoke-test.sh
|
||||||
|
#./cleanup.sh
|
|
@ -0,0 +1,21 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes \
|
||||||
|
--format 'value(address)')
|
||||||
|
|
||||||
|
kubectl config set-cluster kubernetes-the-hard-way \
|
||||||
|
--certificate-authority=ca.pem \
|
||||||
|
--embed-certs=true \
|
||||||
|
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443
|
||||||
|
|
||||||
|
kubectl config set-credentials admin --token chAng3m3
|
||||||
|
|
||||||
|
kubectl config set-context default-context \
|
||||||
|
--cluster=kubernetes-the-hard-way \
|
||||||
|
--user=admin \
|
||||||
|
--namespace=""
|
||||||
|
|
||||||
|
kubectl config use-context default-context
|
||||||
|
kubectl get componentstatuses
|
||||||
|
kubectl get nodes
|
|
@ -0,0 +1,117 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS and NUM_WORKERS environment variables"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( NUM_CONTROLLERS-- ))
|
||||||
|
(( NUM_WORKERS-- ))
|
||||||
|
|
||||||
|
echo '{
|
||||||
|
"signing": {
|
||||||
|
"default": {
|
||||||
|
"expiry": "8760h"
|
||||||
|
},
|
||||||
|
"profiles": {
|
||||||
|
"kubernetes": {
|
||||||
|
"usages": ["signing", "key encipherment", "server auth", "client auth"],
|
||||||
|
"expiry": "8760h"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}' > ca-config.json
|
||||||
|
|
||||||
|
echo '{
|
||||||
|
"CN": "Kubernetes",
|
||||||
|
"key": {
|
||||||
|
"algo": "rsa",
|
||||||
|
"size": 2048
|
||||||
|
},
|
||||||
|
"names": [
|
||||||
|
{
|
||||||
|
"C": "US",
|
||||||
|
"L": "Portland",
|
||||||
|
"O": "Kubernetes",
|
||||||
|
"OU": "CA",
|
||||||
|
"ST": "Oregon"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}' > ca-csr.json
|
||||||
|
|
||||||
|
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
openssl x509 -in ca.pem -text -noout
|
||||||
|
|
||||||
|
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes \
|
||||||
|
--format 'value(address)')
|
||||||
|
|
||||||
|
# Order is inefficient but set up to match original example
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
hosts="${hosts}\t\"worker${i}\",\n"
|
||||||
|
done
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
hosts="${hosts}\t\"ip-10-240-0-2${i}\",\n"
|
||||||
|
done
|
||||||
|
|
||||||
|
hosts="${hosts}\t\"10.32.0.1\",\n"
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
hosts="${hosts}\t\"10.240.0.1${i}\",\n"
|
||||||
|
done
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
hosts="${hosts}\t\"10.240.0.2${i}\",\n"
|
||||||
|
done
|
||||||
|
|
||||||
|
cat > kubernetes-csr.json <<EOF
|
||||||
|
{
|
||||||
|
"CN": "kubernetes",
|
||||||
|
"hosts": [
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo -en ${hosts} | sed 's/\t/ /' >> kubernetes-csr.json
|
||||||
|
|
||||||
|
cat >> kubernetes-csr.json <<EOF
|
||||||
|
"${KUBERNETES_PUBLIC_ADDRESS}",
|
||||||
|
"127.0.0.1"
|
||||||
|
],
|
||||||
|
"key": {
|
||||||
|
"algo": "rsa",
|
||||||
|
"size": 2048
|
||||||
|
},
|
||||||
|
"names": [
|
||||||
|
{
|
||||||
|
"C": "US",
|
||||||
|
"L": "Portland",
|
||||||
|
"O": "Kubernetes",
|
||||||
|
"OU": "Cluster",
|
||||||
|
"ST": "Oregon"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cfssl gencert \
|
||||||
|
-ca=ca.pem \
|
||||||
|
-ca-key=ca-key.pem \
|
||||||
|
-config=ca-config.json \
|
||||||
|
-profile=kubernetes \
|
||||||
|
kubernetes-csr.json | cfssljson -bare kubernetes
|
||||||
|
|
||||||
|
openssl x509 -in kubernetes.pem -text -noout
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
kube_hosts="${kube_hosts}controller${i} "
|
||||||
|
done
|
||||||
|
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
kube_hosts="${kube_hosts}worker${i} "
|
||||||
|
done
|
||||||
|
|
||||||
|
for host in ${kube_hosts}; do
|
||||||
|
gcloud compute copy-files ca.pem kubernetes-key.pem kubernetes.pem ${host}:~/
|
||||||
|
done
|
|
@ -0,0 +1,21 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
kubectl run nginx --image=nginx --port=80 --replicas=3
|
||||||
|
|
||||||
|
kubectl get pods -o wide
|
||||||
|
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
kubectl expose deployment nginx --type NodePort
|
||||||
|
|
||||||
|
NODE_PORT=$(kubectl get svc nginx --output=jsonpath='{range .spec.ports[0]}{.nodePort}')
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create kubernetes-nginx-service \
|
||||||
|
--allow=tcp:${NODE_PORT} \
|
||||||
|
--network kubernetes
|
||||||
|
|
||||||
|
NODE_PUBLIC_IP=$(gcloud compute instances describe worker0 \
|
||||||
|
--format 'value(networkInterfaces[0].accessConfigs[0].natIP)')
|
||||||
|
|
||||||
|
curl http://${NODE_PUBLIC_IP}:${NODE_PORT}
|
|
@ -0,0 +1,76 @@
|
||||||
|
#!/usr/bin/bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z ${NUM_CONTROLLERS} || -z ${NUM_WORKERS} ]]; then
|
||||||
|
echo "Must set NUM_CONTROLLERS and NUM_WORKERS env vars"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( NUM_CONTROLLERS-- ))
|
||||||
|
(( NUM_WORKERS-- ))
|
||||||
|
|
||||||
|
gcloud compute networks create kubernetes --mode custom
|
||||||
|
|
||||||
|
gcloud compute networks subnets create kubernetes \
|
||||||
|
--network kubernetes \
|
||||||
|
--range 10.240.0.0/24
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create kubernetes-allow-icmp \
|
||||||
|
--allow icmp \
|
||||||
|
--network kubernetes \
|
||||||
|
--source-ranges 0.0.0.0/0
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create kubernetes-allow-internal \
|
||||||
|
--allow tcp:0-65535,udp:0-65535,icmp \
|
||||||
|
--network kubernetes \
|
||||||
|
--source-ranges 10.240.0.0/24
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create kubernetes-allow-rdp \
|
||||||
|
--allow tcp:3389 \
|
||||||
|
--network kubernetes \
|
||||||
|
--source-ranges 0.0.0.0/0
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create kubernetes-allow-ssh \
|
||||||
|
--allow tcp:22 \
|
||||||
|
--network kubernetes \
|
||||||
|
--source-ranges 0.0.0.0/0
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create kubernetes-allow-healthz \
|
||||||
|
--allow tcp:8080 \
|
||||||
|
--network kubernetes \
|
||||||
|
--source-ranges 130.211.0.0/22
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create kubernetes-allow-api-server \
|
||||||
|
--allow tcp:6443 \
|
||||||
|
--network kubernetes \
|
||||||
|
--source-ranges 0.0.0.0/0
|
||||||
|
|
||||||
|
gcloud compute firewall-rules list --filter "network=kubernetes"
|
||||||
|
|
||||||
|
gcloud compute addresses create kubernetes --region=us-west1
|
||||||
|
|
||||||
|
gcloud compute addresses list kubernetes
|
||||||
|
|
||||||
|
# Kubernetes controller
|
||||||
|
for i in $(eval echo "{0..${NUM_CONTROLLERS}}"); do
|
||||||
|
gcloud compute instances create controller${i} \
|
||||||
|
--boot-disk-size 200GB \
|
||||||
|
--can-ip-forward \
|
||||||
|
--image ubuntu-1604-xenial-v20160921 \
|
||||||
|
--image-project ubuntu-os-cloud \
|
||||||
|
--machine-type n1-standard-1 \
|
||||||
|
--private-network-ip 10.240.0.1${i} \
|
||||||
|
--subnet kubernetes
|
||||||
|
done
|
||||||
|
|
||||||
|
# Kubernetes workers
|
||||||
|
for i in $(eval echo "{0..${NUM_WORKERS}}"); do
|
||||||
|
gcloud compute instances create worker${i} \
|
||||||
|
--boot-disk-size 200GB \
|
||||||
|
--can-ip-forward \
|
||||||
|
--image ubuntu-1604-xenial-v20160921 \
|
||||||
|
--image-project ubuntu-os-cloud \
|
||||||
|
--machine-type n1-standard-2 \
|
||||||
|
--private-network-ip 10.240.0.2${i} \
|
||||||
|
--subnet kubernetes
|
||||||
|
done
|
Loading…
Reference in New Issue