add codes

pull/594/head
Yuri Liang 2020-08-28 18:06:10 +08:00 committed by Yuri
parent ca96371e4d
commit 257ea56edf
8 changed files with 491 additions and 0 deletions

1
.gitignore vendored
View File

@ -48,3 +48,4 @@ service-account.csr
service-account.pem service-account.pem
service-account-csr.json service-account-csr.json
*.swp *.swp
.terraform

View File

@ -0,0 +1,131 @@
# networks
resource "google_compute_network" "vnet" {
name = "${var.environment}-vnet"
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "subnet" {
name = "container"
ip_cidr_range = var.address_prefix
region = var.region
network = google_compute_network.vnet.id
}
resource "google_compute_firewall" "internal" {
name = "internal"
network = google_compute_network.vnet.id
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
source_ranges = var.internal_cidr
}
resource "google_compute_firewall" "external" {
name = "external"
network = google_compute_network.vnet.id
allow {
protocol = "icmp"
}
allow {
protocol = "tcp"
ports = ["22", "6443"]
}
source_ranges = var.external_cidr
}
resource "google_compute_address" "extip" {
name = "external-ip"
region = var.region
}
# Compute instances (we use instance template here)
data "google_compute_image" "ubuntu" {
family = "ubuntu-2004-lts"
project = "ubuntu-os-cloud"
}
resource "google_compute_instance" "controller" {
count = var.controller_count
name = "${var.environment}-controller-${count.index}"
machine_type = var.vm_size
zone = var.zone
can_ip_forward = true
network_interface {
network = google_compute_network.vnet.self_link
subnetwork = google_compute_subnetwork.subnet.name
network_ip = element(var.controller_ip_list, count.index)
# we dont have enough quota for external ip address
# access_config {}
}
boot_disk {
initialize_params {
image = data.google_compute_image.ubuntu.self_link
size = var.boot_disk_size
type = var.boot_disk_type
}
}
service_account {
scopes = var.controller_scopes
}
# resize VM after initial creation
allow_stopping_for_update = true
description = "kubernetes Controller Nodes"
tags = var.controller_node_tags
}
resource "google_compute_instance" "worker" {
count = var.worker_count
name = "${var.environment}-worker-${count.index}"
machine_type = var.vm_size
zone = var.zone
can_ip_forward = true
network_interface {
network = google_compute_network.vnet.self_link
subnetwork = google_compute_subnetwork.subnet.name
network_ip = element(var.worker_ip_list, count.index)
# we dont have enough quota for external ip address
# access_config {}
}
boot_disk {
initialize_params {
image = data.google_compute_image.ubuntu.self_link
size = var.boot_disk_size
type = var.boot_disk_type
}
}
metadata = {
pod-cidr = element(var.pod_address_prefix, count.index)
}
service_account {
scopes = var.worker_scopes
}
# resize VM after initial creation
allow_stopping_for_update = true
description = "kubernetes Worker Nodes"
tags = var.worker_node_tags
}

View File

@ -0,0 +1,7 @@
output "controller_private_ip" {
value = google_compute_instance.controller.*.network_interface.0.network_ip
}
output "worker_private_ip" {
value = google_compute_instance.worker.*.network_interface.0.network_ip
}

View File

@ -0,0 +1,87 @@
variable "environment" {
description = "Name of this lab"
}
variable "address_prefix" {
description = "Network CIDR"
}
variable "region" {
description = "Region of this lab"
}
variable "zone" {
description = "Zone of VM"
}
variable "internal_cidr" {
description = "CIDR Allowed internal"
}
variable "external_cidr" {
description = "CIDR Allowed external"
}
variable "vm_size" {
description = "The machine type to create."
}
variable "boot_disk_type" {
description = "The GCE disk type. Can be either pd-ssd, local-ssd, or pd-standard"
default = "pd-standard"
}
variable "boot_disk_size" {
type = number
description = "The size of the image in gigabytes"
default = 200
}
variable "controller_count" {
type = number
description = "Number of controller nodes"
default = 3
}
variable "worker_count" {
type = number
description = "Number of worker nodes"
default = 3
}
variable "controller_ip_list" {
type = list(string)
description = "list of controller ip"
}
variable "worker_ip_list" {
type = list(string)
description = "list of worker ip"
}
variable "controller_scopes" {
type = list(string)
description = "Scopes of controller Nodes"
default = ["compute-rw", "storage-ro", "service-management", "service-control", "logging-write", "monitoring"]
}
variable "worker_scopes" {
type = list(string)
description = "Scopes of Worker Nodes"
default = ["compute-rw", "storage-ro", "service-management", "service-control", "logging-write", "monitoring"]
}
variable "controller_node_tags" {
type = list(string)
description = "A list of network tags to attach to the instance."
}
variable "worker_node_tags" {
type = list(string)
description = "A list of network tags to attach to the instance."
}
variable "pod_address_prefix" {
type = list(string)
description = "Pod Address Space prefix"
}

15
codes/main.tf Normal file
View File

@ -0,0 +1,15 @@
module "kubernetes" {
source = "./kubernetes/terraform/gcp"
environment = "kubernetes"
region = "us-west1"
zone = "us-west1-b"
address_prefix = "10.240.0.0/24"
internal_cidr = ["10.240.0.0/24", "10.200.0.0/16"]
external_cidr = ["0.0.0.0/0"]
vm_size = "custom-1-8192-ext"
controller_ip_list = ["10.240.0.10", "10.240.0.11", "10.240.0.12"]
controller_node_tags = ["kubernetes-the-hard-way", "controller"]
worker_ip_list = ["10.240.0.20", "10.240.0.21", "10.240.0.22"]
worker_node_tags = ["kubernetes-the-hard-way", "worker"]
pod_address_prefix = ["10.200.0.0/24", "10.200.1.0/24", "10.200.2.0/24"]
}

7
codes/output.tf Normal file
View File

@ -0,0 +1,7 @@
output "controller_nodes" {
value = module.kubernetes.controller_private_ip
}
output "worker_nodes" {
value = module.kubernetes.worker_private_ip
}

3
codes/provider.tf Normal file
View File

@ -0,0 +1,3 @@
provider "google" {
project = "handy-cache-287800"
}

View File

@ -0,0 +1,240 @@
#!/bin/bash
########################
# Install Clinet Tools #
########################
# Only for MacOS (Because I love MacOS)
brew install cfssl
brew cask install google-cloud-sdk
if [ ! -f /usr/local/bin/kubectl ]; then
curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/darwin/amd64/kubectl
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
fi
##############################################################################################
# provision a Certificate Authority that can be used to generate additional TLS certificates #
##############################################################################################
# Generate the CA configuration file, certificate, and private key
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "CA",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
# generate client and server certificates for each Kubernetes component and a client certificate for the Kubernetes admin user.
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:masters",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
# create a certificate for each Kubernetes worker node that meets the Node Authorizer requirements
for instance in kubernetes-worker-0 kubernetes-worker-1 kubernetes-worker-2; do
cat > ${instance}-csr.json <<EOF
{
"CN": "system:node:${instance}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
EXTERNAL_IP=$(gcloud compute instances describe ${instance} --format 'value(networkInterfaces[0].accessConfigs[0].natIP)')
INTERNAL_IP=$(gcloud compute instances describe ${instance} --format 'value(networkInterfaces[0].networkIP)')
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${instance},${EXTERNAL_IP},${INTERNAL_IP} \
-profile=kubernetes \
${instance}-csr.json | cfssljson -bare ${instance}
done
# Generate the kube-controller-manager client certificate and private key
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:kube-controller-manager",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
# Generate the kube-proxy client certificate and private key
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:node-proxier",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
# Generate the kube-scheduler client certificate and private key
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:kube-scheduler",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
# Generate the Kubernetes API Server certificate and private key
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe external-ip --region $(gcloud config get-value compute/region) --format 'value(address)')
KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=10.32.0.1,10.240.0.10,10.240.0.11,10.240.0.12,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,${KUBERNETES_HOSTNAMES} -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
# Service Account Key Pair
cat > service-account-csr.json <<EOF
{
"CN": "service-accounts",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes service-account-csr.json | cfssljson -bare service-account
# Copy the appropriate certificates and private keys to each worker instance
for instance in kubernetes-worker-0 kubernetes-worker-1 kubernetes-worker-2; do
gcloud compute scp ca.pem ${instance}-key.pem ${instance}.pem ${instance}:~/
done
# Copy the appropriate certificates and private keys to each controller instance
for instance in kubernetes-controller-0 kubernetes-controller-1 kubernetes-controller-2; do
gcloud compute scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem ${instance}:~/
done