VTWO-14496: configure nodes with ansible instead of bash scripts

pull/468/head
mbenabda 2019-06-27 18:53:09 +02:00
parent e505bac08a
commit de0fa7e688
26 changed files with 239 additions and 241 deletions

5
.gitignore vendored
View File

@ -1,5 +1,4 @@
.vagrant .vagrant
kubernetes/
etcd*/
cluster.config
inventory/generated inventory/generated
*.retry

View File

@ -9,50 +9,33 @@ It should be able to get you to a working single master (insecure) kubernetes se
# prerequisites # prerequisites
- vagrant - vagrant
- the scp vagrant plugin : `vagrant plugin install vagrant-scp` - cfssl
- [the GNU parallel CLI](https://www.gnu.org/software/parallel/) - cfssljson
- [jq](https://stedolan.github.io/jq/)
You can run the following command to check if you've missed something (don't worry, it won't install anything on your machine)
```sh
ansible-playbook kthw-playbook.yml -t check_local_prerequisites -l localhost
```
# setup # setup
- run `vagrant up` to start the vms. This will create a master node and 2 worker nodes on your host's network - run `vagrant up` to start the vms. This will create a master node and 2 worker nodes on your host's network
- run `./scripts/show_cluster_config | tee cluster.config` - setup a container runtime on the nodes
- copy the cluster configuration to the nodes:
```sh ```sh
./scripts/copy_file_to_nodes cluster.config ansible-playbook kthw-playbook.yml -t install_container_runtime -l k8s_nodes
``` ```
- install the jq CLI on the nodes so they can read the config - install kubelet, kube-proxy, apiserver, scheduler and native controllers on the master nodes
```sh ```sh
./scripts/run_script_on_nodes install_jq_cli ansible-playbook kthw-playbook.yml -t install_kubernetes_master_components -l masters
``` ```
- setup a container runtime - install kubelet & kube-proxy on the worker nodes
```sh ```sh
./scripts/run_script_on_nodes install_container_runtime ansible-playbook kthw-playbook.yml -t install_kubernetes_worker_components -l workers
``` ```
- download kubernetes - install etcd on the master nodes
```sh ```sh
./scripts/download_kubernetes_binaries $(cat cluster.config | jq -r ".kubernetes_version") ./kubernetes ansible-playbook kthw-playbook.yml -t install_etcd -l masters
```
- download etcd
```sh
./scripts/download_etcd_binaries $(cat cluster.config | jq -r ".etcd3_version") ./etcd3
```
- copy kubelet & kube-proxy on the worker nodes
```sh
./scripts/copy_file_to_nodes ./kubernetes/workers worker
./scripts/run_command_on_nodes 'sudo mv ~/workers/* /usr/bin/ && rmdir ~/workers' worker
```
- copy etcd, kubelet, kube-proxy, apiserver, scheduler and native controllers binaries to the master nodes
```sh
./scripts/copy_file_to_nodes ./etcd3 master
./scripts/run_command_on_nodes 'sudo mv ~/etcd3/* /usr/bin/ && rmdir ~/etcd3' master
./scripts/copy_file_to_nodes ./kubernetes/masters master
./scripts/run_command_on_nodes 'sudo mv ~/masters/* /usr/bin/ && rmdir ~/masters' master
``` ```

40
Vagrantfile vendored
View File

@ -15,8 +15,6 @@ hosts = {
workers: (1..2).map { |i| "worker-node-#{i}" } workers: (1..2).map { |i| "worker-node-#{i}" }
} }
generated_ansible_inventory_file="./inventory/generated"
Vagrant.configure("2") do |config| Vagrant.configure("2") do |config|
config.vm.box = "debian/stretch64" config.vm.box = "debian/stretch64"
config.vm.box_version = "= 9.9.1" config.vm.box_version = "= 9.9.1"
@ -26,7 +24,29 @@ Vagrant.configure("2") do |config|
# greet from every configured VM, revealing its hostname # greet from every configured VM, revealing its hostname
config.vm.provision "shell", inline: "echo Hello from \$HOSTNAME" config.vm.provision "shell", inline: "echo Hello from \$HOSTNAME"
(hosts[:masters] + hosts[:workers]).each do |node_name| # complete the ansible inventory with groups
config.trigger.before :up do |trigger|
inventory = File.open("./inventory/generated", "w")
all_hosts = []
hosts.keys.each do |group_name|
inventory.puts "[#{group_name}]"
hosts[group_name].each do |node_name|
inventory.puts node_name
all_hosts << node_name
end
end
inventory.puts "[k8s_nodes]"
all_hosts.each do |node_name|
inventory.puts node_name
end
end
# provision the vms
hosts.keys.each do |node_group|
hosts[node_group].each do |node_name|
config.vm.define node_name do |node| config.vm.define node_name do |node|
node.vm.hostname = node_name node.vm.hostname = node_name
@ -38,20 +58,6 @@ Vagrant.configure("2") do |config|
end end
end end
config.trigger.after :up do |trigger|
File.open(generated_ansible_inventory_file, "w") do |w|
w.puts "[masters]"
hosts[:masters].each { |host| w.puts host }
w.puts "[workers]"
hosts[:workers].each { |host| w.puts host }
end
end end
"""
config.trigger.after :destroy do |trigger|
File.delete(generated_ansible_inventory_file) if File.exist?(generated_ansible_inventory_file)
end
"""
end end

View File

@ -1,3 +1,5 @@
[defaults] [defaults]
remote_tmp = /tmp/$USER/ansible remote_tmp = /tmp/$USER/ansible
inventory = inventory/ inventory = inventory/
host_key_checking = false

View File

@ -0,0 +1,21 @@
---
- name: Prerequisites | required commands are installed and in PATH
shell: command -v {{ item }} >/dev/null 2>&1
ignore_errors: yes
with_items:
- cfssl
- cfssljson
- kubectl
register: localhost_has_required_commands
- name: Prerequisites | instructions
assert:
that: "{{ 'failed' not in localhost_has_required_commands }}"
fail_msg: "you need to install the following commands: {{ localhost_has_required_commands.results | selectattr('failed', 'sameas', true) | list | map(attribute='item') | list | join(',')}}"
- name: Prerequisites | set_fact
set_fact:
local_prerequisites_met: "{{ 'failed' not in localhost_has_required_commands }}"

View File

@ -0,0 +1,49 @@
---
- name: Container runtime | Docker | Install dependencies
become: yes
apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg2
- software-properties-common
state: present
update_cache: yes
- name: Container runtime | Docker | Add Docker's apt repository signing key
become: yes
apt_key:
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Container runtime | Docker | Add docker repository
become: yes
apt_repository:
repo: "deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
state: present
update_cache: yes
- name: Container runtime | Docker | Install
become: yes
apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
allow_unauthenticated: yes
state: present
- name: Container runtime | Docker | Start
become: yes
service:
name: docker
state: started
enabled: yes
- name: "Container runtime | Docker | add user {{ ansible_user_id }} to group docker"
become: yes
user:
name: "{{ ansible_user_id }}"
append: yes
groups: docker

View File

@ -0,0 +1,4 @@
kubernetes_version: v1.15.0
etcd3_version: v3.3.13

6
inventory/localhost Normal file
View File

@ -0,0 +1,6 @@
[localhost]
127.0.0.1
[localhost:vars]
ansible_connection=local

View File

@ -49,3 +49,4 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()

47
kthw-playbook.yml Normal file
View File

@ -0,0 +1,47 @@
---
- hosts: all
tasks:
- name: Assert local prerequisites are met
import_tasks: ./check_local_prerequisites.yml
when: "'localhost' in group_names"
tags:
- check_local_prerequisites
- name: Install a container runtime
import_tasks: ./install_container_runtime.yml
when: "'k8s_nodes' in group_names"
tags:
- install_container_runtime
- name: Install kubernetes master components
become: yes
script: ./scripts/install_kubernetes_master_components {{ kubernetes_version }}
args:
creates: /tmp/.install_kubernetes_master_components
when: "'masters' in group_names"
tags:
- install_kubernetes_components
- install_kubernetes_master_components
- name: Install kubernetes worker components
become: yes
script: ./scripts/install_kubernetes_worker_components {{ kubernetes_version }}
args:
creates: /tmp/.install_kubernetes_worker_components
when: "'workers' in group_names"
tags:
- install_kubernetes_components
- install_kubernetes_worker_components
- name: Install etcd
become: yes
script: ./scripts/install_etcd {{ etcd3_version }}
args:
creates: /tmp/.install_etcd
when: "'masters' in group_names"
tags:
- install_etcd

View File

View File

@ -1,17 +0,0 @@
#!/usr/bin/env bash
SCRIPTS_DIR=$(dirname $0)
FILE_TO_COPY=$(realpath "$1")
NODE_NAMES="$($SCRIPTS_DIR/node_names $2)"
echo "will copy $FILE_TO_COPY to nodes $(echo $NODE_NAMES | xargs)"
echo ""
echo ""
echo ""
echo ""
read -n 1 -s -r -p "Press any key to continue..."
echo ""
echo ""
parallel vagrant scp $FILE_TO_COPY {}:~/ ::: $NODE_NAMES

View File

@ -1,18 +0,0 @@
#!/usr/bin/env bash
ETCD3_RELEASE_VERSION=$1
OUTPUT_DIR=$(realpath "$2")
tmp_dir=$(mktemp -d)
pushd "$tmp_dir" &> /dev/null
curl -sL "https://github.com/etcd-io/etcd/releases/download/$ETCD3_RELEASE_VERSION/etcd-$ETCD3_RELEASE_VERSION-linux-amd64.tar.gz" | tar -zxf -
mkdir -p $OUTPUT_DIR
mv etcd-$ETCD3_RELEASE_VERSION-linux-amd64/etcd $OUTPUT_DIR/
mv etcd-$ETCD3_RELEASE_VERSION-linux-amd64/etcdctl $OUTPUT_DIR/
popd &> /dev/null
rm -rf $tmp_dir

View File

@ -1,30 +0,0 @@
#!/usr/bin/env bash
# v1.15.0
K8S_RELEASE_VERSION=$1
OUTPUT_DIR=$(realpath "$2")
tmp_dir=$(mktemp -d)
pushd "$tmp_dir" &> /dev/null
container_id=$(docker create gcr.io/google-containers/hyperkube:$K8S_RELEASE_VERSION)
docker cp $container_id:/hyperkube ./hyperkube
docker rm -f $container_id &> /dev/null
mkdir -p $OUTPUT_DIR/workers
cp hyperkube $OUTPUT_DIR/workers/kubelet
cp hyperkube $OUTPUT_DIR/workers/proxy
cp hyperkube $OUTPUT_DIR/workers/kubectl
mkdir -p $OUTPUT_DIR/masters
cp hyperkube $OUTPUT_DIR/masters/kubelet
cp hyperkube $OUTPUT_DIR/masters/proxy
cp hyperkube $OUTPUT_DIR/masters/kubectl
cp hyperkube $OUTPUT_DIR/masters/scheduler
cp hyperkube $OUTPUT_DIR/masters/controller-manager
cp hyperkube $OUTPUT_DIR/masters/cloud-controller-manager
cp hyperkube $OUTPUT_DIR/masters/apiserver
popd &> /dev/null
rm -rf $tmp_dir

View File

@ -1,24 +0,0 @@
#!/usr/bin/env bash
sudo apt-get update -y
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
sudo apt-get install -y --allow-unauthenticated docker-ce docker-ce-cli containerd.io
sudo systemctl enable docker
sudo systemctl start docker
sleep 5
sudo usermod -aG docker $USER

16
scripts/install_etcd Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
ETCD3_RELEASE_VERSION=$1
pushd /tmp &> /dev/null
curl -sL "https://github.com/etcd-io/etcd/releases/download/$ETCD3_RELEASE_VERSION/etcd-$ETCD3_RELEASE_VERSION-linux-amd64.tar.gz" | tar -zxf -
mv etcd-$ETCD3_RELEASE_VERSION-linux-amd64/etcd /usr/bin/
mv etcd-$ETCD3_RELEASE_VERSION-linux-amd64/etcdctl /usr/bin/
rm -rf etcd*
touch .install_etcd
popd &> /dev/null

View File

@ -1,5 +0,0 @@
#!/usr/bin/env bash
sudo apt-get update -y
sudo apt-get install -y jq

View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
K8S_RELEASE_VERSION=$1
pushd /tmp
container_id=$(docker create gcr.io/google-containers/hyperkube:$K8S_RELEASE_VERSION)
docker cp $container_id:/hyperkube
docker rm -f $container_id
chmod u+x hyperkube
cp hyperkube /usr/bin/kubelet
cp hyperkube /usr/bin/proxy
cp hyperkube /usr/bin/kubectl
cp hyperkube /usr/bin/scheduler
cp hyperkube /usr/bin/controller-manager
cp hyperkube /usr/bin/cloud-controller-manager
cp hyperkube /usr/bin/apiserver
rm hyperkube
touch .install_kubernetes_master_components
popd

View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
K8S_RELEASE_VERSION=$1
pushd /tmp
container_id=$(docker create gcr.io/google-containers/hyperkube:$K8S_RELEASE_VERSION)
docker cp $container_id:/hyperkube
docker rm -f $container_id
chmod u+x hyperkube
cp hyperkube /usr/bin/kubelet
cp hyperkube /usr/bin/proxy
cp hyperkube /usr/bin/kubectl
rm hyperkube
touch .install_kubernetes_worker_components
popd

View File

@ -1,9 +0,0 @@
#!/usr/bin/env bash
SCRIPTS_DIR=$(dirname $0)
for vagrant_box in $($SCRIPTS_DIR/node_names $1)
do
ip_address=$(vagrant ssh $vagrant_box -c "hostname -I | cut -d' ' -f2" 2>/dev/null)
echo "$vagrant_box $ip_address"
done

View File

@ -1,3 +0,0 @@
#!/usr/bin/env bash
vagrant status --machine-readable | grep ,state, | cut -d, -f2 | sort | uniq | grep "$1"

View File

@ -1,31 +0,0 @@
#!/usr/bin/env bash
SCRIPTS_DIR=$(dirname $0)
MY_COMMAND="$1"
NODE_NAMES_FILTER="$2"
NODE_NAMES="$($SCRIPTS_DIR/node_names $NODE_NAMES_FILTER)"
echo "Will run the following command on $(echo $NODE_NAMES | xargs):"
echo "$MY_COMMAND"
echo ""
echo ""
read -n 1 -s -r -p "Press any key to continue..."
echo ""
# remotely run the script
pushd $SCRIPTS_DIR &> /dev/null
tmp_file=$(mktemp)
cat << WRAPPER_EOF > $tmp_file
#!/usr/bin/env bash
$MY_COMMAND
WRAPPER_EOF
yes | ./run_script_on_nodes "$(basename $tmp_file)" "$NODE_NAMES_FILTER" &> /dev/null
rm -rf $tmp_file
popd &> /dev/null

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
SCRIPTS_DIR=$(dirname $0)
SCRIPT_NAME=$1
NODE_NAMES_FILTER=$2
NODE_NAMES="$($SCRIPTS_DIR/node_names $NODE_NAMES_FILTER)"
cat $SCRIPTS_DIR/$SCRIPT_NAME
echo ""
echo ""
echo ""
echo ""
read -n 1 -s -r -p "Press any key to continue..."
echo ""
echo ""
# copy script over
pushd $SCRIPTS_DIR &> /dev/null
chmod u+x "$SCRIPT_NAME"
yes | ./copy_file_to_nodes "$SCRIPT_NAME" "$NODE_NAMES_FILTER" &> /dev/null
popd &> /dev/null
# remotely run the script
parallel vagrant ssh {} -c "\~/$SCRIPT_NAME" ::: $NODE_NAMES

View File

@ -1,17 +0,0 @@
#!/usr/bin/env bash
KTHW_KUBERNETES_VERSION=v1.15.0
KTHW_ETCD3_VERSION=v3.3.13
cat <<EOF
{
"kubernetes_version": "$KTHW_KUBERNETES_VERSION",
"etcd3_version": "$KTHW_ETCD3_VERSION",
"master": null,
"workers": [
]
}
EOF

View File