VTWO-14496 : continue configuring machine

pull/468/head
mbenabda 2019-06-20 20:56:42 +02:00
parent ee481cc7d4
commit 89fee4b7aa
12 changed files with 180 additions and 11 deletions

5
.gitignore vendored
View File

@ -1 +1,4 @@
.vagrant .vagrant
kubernetes/
etcd*/
cluster.config

View File

@ -1,17 +1,62 @@
# Introduction # Introduction
This repository is intended for demo-ing the manual install of kubernetes's components on both master and worker nodes. This repository is intended for demo-ing the manual install of kubernetes's components on both master and worker nodes.
It should be able to get you to a working single master kubernetes setup on a set of vagrant boxes
It should be able to get you to a working single master (insecure) kubernetes setup on a set of VMs
```plantuml
@startuml
database etcd [
etcd
]
package "master-node" {
[api-server] -> etcd
[kubelet] --> [api-server] : watch
[kubelet] --> [container-runtime] : run & watch
[scheduler] --> [api-server] : watch
[scheduler] --> [api-server] : apply
[controller-manager] --> [api-server] : watch
[controller-manager] --> [api-server] : apply
}
package "worker-node-1" {
[kubelet ] --> [api-server] : watch
[kubelet ] --> [container-runtime ] : run & watch
}
package "worker-node-2" {
[kubelet ] --> [api-server] : watch
[kubelet ] --> [container-runtime ] : run & watch
}
@enduml
```
# prerequisites # prerequisites
- vagrant - vagrant
- the scp vagrant plugin : `vagrant plugin install vagrant-scp` - the scp vagrant plugin : `vagrant plugin install vagrant-scp`
- [the GNU parallel CLI](https://www.gnu.org/software/parallel/) - [the GNU parallel CLI](https://www.gnu.org/software/parallel/)
- [jq](https://stedolan.github.io/jq/)
# setup # setup
- start the vms - run `vagrant up` to start the vms. This will create a master node and 2 worker nodes on your host's network
- run `./scripts/show_cluster_config | tee cluster.config`
- copy the cluster configuration to the nodes:
```sh ```sh
vagrant up ./scripts/copy_file_to_nodes cluster.config
```
- install the jq CLI on the nodes so they can read the config
```sh
./scripts/run_script_on_nodes install_jq_cli
``` ```
- setup a container runtime - setup a container runtime
@ -21,5 +66,19 @@ vagrant up
- download kubernetes - download kubernetes
```sh ```sh
./scripts/run_script_on_nodes download_node_binaries ./scripts/download_kubernetes_binaries $(cat cluster.config | jq -r ".kubernetes_version") ./kubernetes
```
- download etcd
```sh
./scripts/download_etcd_binaries $(cat cluster.config | jq -r ".etcd3_version") ./etcd3
```
- copy kubelet & kube-proxy on the worker nodes
```sh
./scripts/copy_file_to_nodes ./kubernetes/workers worker
```
- copy kubelet, proxy, apiserver, scheduler and native controllers binaries to the master nodes
```sh
./scripts/copy_file_to_nodes ./etcd3 master
``` ```

2
Vagrantfile vendored
View File

@ -8,6 +8,8 @@ Vagrant.configure("2") do |config|
config.vm.box = "debian/stretch64" config.vm.box = "debian/stretch64"
config.vm.box_version = "= 9.9.1" config.vm.box_version = "= 9.9.1"
config.vm.network "private_network", type: "dhcp"
# greet from every configured VM, revealing its hostname # greet from every configured VM, revealing its hostname
config.vm.provision "shell", inline: "echo Hello from \$HOSTNAME" config.vm.provision "shell", inline: "echo Hello from \$HOSTNAME"

17
scripts/copy_file_to_nodes Executable file
View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
SCRIPTS_DIR=$(dirname $0)
FILE_TO_COPY=$(realpath "$1")
NODE_NAMES="$($SCRIPTS_DIR/node_names $2)"
echo "will copy $FILE_TO_COPY to nodes $(echo $NODE_NAMES | xargs)"
echo ""
echo ""
echo ""
echo ""
read -n 1 -s -r -p "Press any key to continue..."
echo ""
echo ""
parallel vagrant scp $FILE_TO_COPY {}:~/ ::: $NODE_NAMES

18
scripts/download_etcd_binaries Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
ETCD3_RELEASE_VERSION=$1
OUTPUT_DIR=$(realpath "$2")
tmp_dir=$(mktemp -d)
pushd "$tmp_dir" &> /dev/null
curl -sL "https://github.com/etcd-io/etcd/releases/download/$ETCD3_RELEASE_VERSION/etcd-$ETCD3_RELEASE_VERSION-linux-arm64.tar.gz" | tar -zxf -
mkdir -p $OUTPUT_DIR
mv etcd-$ETCD3_RELEASE_VERSION-linux-arm64/etcd $OUTPUT_DIR/
mv etcd-$ETCD3_RELEASE_VERSION-linux-arm64/etcdctl $OUTPUT_DIR/
popd &> /dev/null
rm -rf $tmp_dir

View File

@ -0,0 +1,30 @@
#!/usr/bin/env bash
# v1.15.0
K8S_RELEASE_VERSION=$1
OUTPUT_DIR=$(realpath "$2")
tmp_dir=$(mktemp -d)
pushd "$tmp_dir" &> /dev/null
container_id=$(docker create gcr.io/google-containers/hyperkube:$K8S_RELEASE_VERSION)
docker cp $container_id:/hyperkube ./hyperkube
docker rm -f $container_id &> /dev/null
mkdir -p $OUTPUT_DIR/workers
cp hyperkube $OUTPUT_DIR/workers/kubelet
cp hyperkube $OUTPUT_DIR/workers/proxy
cp hyperkube $OUTPUT_DIR/workers/kubectl
mkdir -p $OUTPUT_DIR/masters
cp hyperkube $OUTPUT_DIR/masters/kubelet
cp hyperkube $OUTPUT_DIR/masters/proxy
cp hyperkube $OUTPUT_DIR/masters/kubectl
cp hyperkube $OUTPUT_DIR/masters/scheduler
cp hyperkube $OUTPUT_DIR/masters/controller-manager
cp hyperkube $OUTPUT_DIR/masters/cloud-controller-manager
cp hyperkube $OUTPUT_DIR/masters/apiserver
popd &> /dev/null
rm -rf $tmp_dir

View File

@ -1,2 +0,0 @@
#!/usr/bin/env bash
curl -sL https://github.com/kubernetes/kubernetes/releases/download/v1.15.0/kubernetes.tar.gz | tar zxvf -

View File

@ -13,4 +13,12 @@ curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" sudo add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
sudo apt-get install -y --allow-unauthenticated docker-ce docker-ce-cli containerd.io sudo apt-get install -y --allow-unauthenticated docker-ce docker-ce-cli containerd.io
sudo systemctl enable docker
sleep 5
sudo systemctl start docker
sudo usermod -aG docker $USER

5
scripts/install_jq_cli Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/env bash
sudo apt-get update -y
sudo apt-get install -y jq

9
scripts/node_ip_addresses Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
SCRIPTS_DIR=$(dirname $0)
for vagrant_box in $($SCRIPTS_DIR/node_names $1)
do
ip_address=$(vagrant ssh $vagrant_box -c "hostname -I | cut -d' ' -f2" 2>/dev/null)
echo "$vagrant_box $ip_address"
done

View File

@ -1,7 +1,8 @@
#!/usr/bin/env bash #!/usr/bin/env bash
SCRIPTS_DIR=$(dirname $0) SCRIPTS_DIR=$(dirname $0)
SCRIPT_NAME=$1 SCRIPT_NAME=$1
NODE_NAMES="$($SCRIPTS_DIR/node_names $2)" NODE_NAMES_FILTER=$2
NODE_NAMES="$($SCRIPTS_DIR/node_names $NODE_NAMES_FILTER)"
cat $SCRIPTS_DIR/$SCRIPT_NAME cat $SCRIPTS_DIR/$SCRIPT_NAME
@ -15,8 +16,10 @@ echo ""
echo "" echo ""
# copy script over # copy script over
chmod u+x "$SCRIPTS_DIR/$SCRIPT_NAME" pushd $SCRIPTS_DIR &> /dev/null
parallel vagrant scp "$SCRIPTS_DIR/$SCRIPT_NAME" "{}:~/" ::: $NODE_NAMES chmod u+x "$SCRIPT_NAME"
yes | ./copy_file_to_nodes "$SCRIPT_NAME" "$NODE_NAMES_FILTER" &> /dev/null
popd &> /dev/null
# remotely run the script # remotely run the script
parallel vagrant ssh {} -c "\~/$SCRIPT_NAME" ::: $NODE_NAMES parallel vagrant ssh {} -c "\~/$SCRIPT_NAME" ::: $NODE_NAMES

17
scripts/show_cluster_config Executable file
View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
KTHW_KUBERNETES_VERSION=v1.15.0
KTHW_ETCD3_VERSION=v3.3.13
cat <<EOF
{
"kubernetes_version": "$KTHW_KUBERNETES_VERSION",
"etcd3_version": "$KTHW_ETCD3_VERSION",
"master": null,
"workers": [
]
}
EOF