Migrate from GCP to AWS with CloudFormation templates
parent
5c462220b7
commit
4a66dbd201
10
README.md
10
README.md
|
@ -1,11 +1,13 @@
|
|||
# Kubernetes The Hard Way
|
||||
# Kubernetes The Hard Way "on AWS"
|
||||
|
||||
This tutorial walks you through setting up Kubernetes the hard way. This guide is not for people looking for a fully automated command to bring up a Kubernetes cluster. If that's you then check out [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine), or the [Getting Started Guides](https://kubernetes.io/docs/setup).
|
||||
This tutorial walks you through setting up Kubernetes the hard way on AWS. Note that this repository is a fork from [kelseyhightower/kubernetes-the-hard-way](https://github.com/kelseyhightower/kubernetes-the-hard-way) and tweaked to use AWS instead of GCP.
|
||||
|
||||
Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.
|
||||
This guide is not for people looking for a fully automated command to bring up a Kubernetes cluster. Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.
|
||||
|
||||
> The results of this tutorial should not be viewed as production ready, and may receive limited support from the community, but don't let that stop you from learning!
|
||||
|
||||
|
||||
|
||||
## Copyright
|
||||
|
||||
<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.
|
||||
|
@ -27,7 +29,7 @@ Kubernetes The Hard Way guides you through bootstrapping a highly available Kube
|
|||
|
||||
## Labs
|
||||
|
||||
This tutorial assumes you have access to the [Google Cloud Platform](https://cloud.google.com). While GCP is used for basic infrastructure requirements the lessons learned in this tutorial can be applied to other platforms.
|
||||
This tutorial assumes you have access to the [Amazon Web Services (AWS)](https://aws.amazon.com). While AWS is used for basic infrastructure requirements the lessons learned in this tutorial can be applied to other platforms.
|
||||
|
||||
* [Prerequisites](docs/01-prerequisites.md)
|
||||
* [Installing the Client Tools](docs/02-client-tools.md)
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
Resources:
|
||||
HardK8sEIP:
|
||||
Type: AWS::EC2::EIP
|
||||
Properties:
|
||||
Tags:
|
||||
- Key: Name
|
||||
Value: eip-kubernetes-the-hard-way
|
||||
|
||||
Outputs:
|
||||
EipAllocation:
|
||||
Value: !GetAtt HardK8sEIP.AllocationId
|
||||
Export: { Name: hard-k8s-eipalloc }
|
|
@ -0,0 +1,140 @@
|
|||
Resources:
|
||||
HardK8sMaster0:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.10
|
||||
# SourceDestCheck: false
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
#cloud-config
|
||||
fqdn: master-0.k8shardway.local
|
||||
hostname: master-0
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
Tags: [ { "Key": "Name", "Value": "master-0" } ]
|
||||
|
||||
HardK8sMaster1:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.11
|
||||
# SourceDestCheck: false
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
#cloud-config
|
||||
fqdn: master-1.k8shardway.local
|
||||
hostname: master-1
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
Tags: [ { "Key": "Name", "Value": "master-1" } ]
|
||||
|
||||
HardK8sMaster2:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.12
|
||||
# SourceDestCheck: false
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
#cloud-config
|
||||
fqdn: master-2.k8shardway.local
|
||||
hostname: master-2
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
Tags: [ { "Key": "Name", "Value": "master-2" } ]
|
||||
|
||||
Parameters:
|
||||
ParamKeyName:
|
||||
Type: AWS::EC2::KeyPair::KeyName
|
||||
Default: ec2-key
|
||||
|
||||
# $ aws ec2 describe-regions --query 'Regions[].RegionName' --output text \
|
||||
# | tr "\t" "\n" | sort \
|
||||
# | xargs -I _R_ aws --region _R_ ec2 describe-images \
|
||||
# --filters Name=name,Values="ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20191002" \
|
||||
# --query 'Images[0].ImageId' --output
|
||||
Mappings:
|
||||
UbuntuAMIs:
|
||||
ap-northeast-1: { "id": "ami-0cd744adeca97abb1" }
|
||||
ap-northeast-2: { "id": "ami-00379ec40a3e30f87" }
|
||||
ap-northeast-3: { "id": "ami-0bd42271bb31d96d2" }
|
||||
ap-south-1: { "id": "ami-0123b531fc646552f" }
|
||||
ap-southeast-1: { "id": "ami-061eb2b23f9f8839c" }
|
||||
ap-southeast-2: { "id": "ami-00a54827eb7ffcd3c" }
|
||||
ca-central-1: { "id": "ami-0b683aae4ee93ef87" }
|
||||
eu-central-1: { "id": "ami-0cc0a36f626a4fdf5" }
|
||||
eu-north-1: { "id": "ami-1dab2163" }
|
||||
eu-west-1: { "id": "ami-02df9ea15c1778c9c" }
|
||||
eu-west-2: { "id": "ami-0be057a22c63962cb" }
|
||||
eu-west-3: { "id": "ami-087855b6c8b59a9e4" }
|
||||
sa-east-1: { "id": "ami-02c8813f1ea04d4ab" }
|
||||
us-east-1: { "id": "ami-04b9e92b5572fa0d1" }
|
||||
us-east-2: { "id": "ami-0d5d9d301c853a04a" }
|
||||
us-west-1: { "id": "ami-0dd655843c87b6930" }
|
||||
us-west-2: { "id": "ami-06d51e91cea0dac8d" }
|
||||
|
||||
Outputs:
|
||||
Master0:
|
||||
Value: !Ref HardK8sMaster0
|
||||
Export: { Name: hard-k8s-master-0 }
|
||||
Master1:
|
||||
Value: !Ref HardK8sMaster1
|
||||
Export: { Name: hard-k8s-master-1 }
|
||||
Master2:
|
||||
Value: !Ref HardK8sMaster2
|
||||
Export: { Name: hard-k8s-master-2 }
|
|
@ -0,0 +1,46 @@
|
|||
Resources:
|
||||
HardK8sVpc:
|
||||
Type: AWS::EC2::VPC
|
||||
Properties:
|
||||
CidrBlock: "10.240.0.0/16"
|
||||
EnableDnsHostnames: true
|
||||
EnableDnsSupport: true
|
||||
HardK8sSubnet:
|
||||
Type: AWS::EC2::Subnet
|
||||
Properties:
|
||||
VpcId: !Ref HardK8sVpc
|
||||
CidrBlock: "10.240.0.0/24"
|
||||
MapPublicIpOnLaunch: true
|
||||
HardK8sRtb:
|
||||
Type: AWS::EC2::RouteTable
|
||||
Properties:
|
||||
VpcId: !Ref HardK8sVpc
|
||||
HardK8sRtbAssociation:
|
||||
Type: AWS::EC2::SubnetRouteTableAssociation
|
||||
Properties:
|
||||
RouteTableId: !Ref HardK8sRtb
|
||||
SubnetId: !Ref HardK8sSubnet
|
||||
HardK8sIgw:
|
||||
Type: AWS::EC2::InternetGateway
|
||||
HardK8sGwAttach:
|
||||
Type: AWS::EC2::VPCGatewayAttachment
|
||||
Properties:
|
||||
VpcId: !Ref HardK8sVpc
|
||||
InternetGatewayId: !Ref HardK8sIgw
|
||||
HardK8sDefaultRoute:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
DestinationCidrBlock: 0.0.0.0/0
|
||||
RouteTableId: !Ref HardK8sRtb
|
||||
GatewayId: !Ref HardK8sIgw
|
||||
|
||||
Outputs:
|
||||
VpcId:
|
||||
Value: !Ref HardK8sVpc
|
||||
Export: { Name: hard-k8s-vpc }
|
||||
SubnetId:
|
||||
Value: !Ref HardK8sSubnet
|
||||
Export: { Name: hard-k8s-subnet }
|
||||
RouteTableId:
|
||||
Value: !Ref HardK8sRtb
|
||||
Export: { Name: hard-k8s-rtb }
|
|
@ -0,0 +1,31 @@
|
|||
Resources:
|
||||
HardK8sNLB:
|
||||
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
|
||||
Properties:
|
||||
Type: network
|
||||
Scheme: internet-facing
|
||||
SubnetMappings:
|
||||
- AllocationId: !ImportValue hard-k8s-eipalloc
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
|
||||
HardK8sListener:
|
||||
Type: AWS::ElasticLoadBalancingV2::Listener
|
||||
Properties:
|
||||
DefaultActions:
|
||||
- TargetGroupArn: !Ref HardK8sTargetGroup
|
||||
Type: forward
|
||||
LoadBalancerArn: !Ref HardK8sNLB
|
||||
Port: 6443
|
||||
Protocol: TCP
|
||||
|
||||
HardK8sTargetGroup:
|
||||
Type: AWS::ElasticLoadBalancingV2::TargetGroup
|
||||
Properties:
|
||||
VpcId: !ImportValue hard-k8s-vpc
|
||||
Protocol: TCP
|
||||
Port: 6443
|
||||
Targets:
|
||||
- Id: !ImportValue hard-k8s-master-0
|
||||
- Id: !ImportValue hard-k8s-master-1
|
||||
- Id: !ImportValue hard-k8s-master-2
|
||||
HealthCheckPort: "80" # default is "traffic-port", which means 6443.
|
|
@ -0,0 +1,16 @@
|
|||
Parameters:
|
||||
ParamNodePort:
|
||||
Type: Number
|
||||
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport
|
||||
MinValue: 30000
|
||||
MaxValue: 32767
|
||||
|
||||
Resources:
|
||||
HardK8sSmokeIngress:
|
||||
Type: AWS::EC2::SecurityGroupIngress
|
||||
Properties:
|
||||
GroupId: !ImportValue hard-k8s-sg
|
||||
CidrIp: 0.0.0.0/0
|
||||
IpProtocol: tcp
|
||||
FromPort: !Ref ParamNodePort
|
||||
ToPort: !Ref ParamNodePort
|
|
@ -0,0 +1,21 @@
|
|||
Resources:
|
||||
RouteWorker0:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
DestinationCidrBlock: 10.200.0.0/24
|
||||
RouteTableId: !ImportValue hard-k8s-rtb
|
||||
InstanceId: !ImportValue hard-k8s-worker-0
|
||||
|
||||
RouteWorker1:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
DestinationCidrBlock: 10.200.1.0/24
|
||||
RouteTableId: !ImportValue hard-k8s-rtb
|
||||
InstanceId: !ImportValue hard-k8s-worker-1
|
||||
|
||||
RouteWorker2:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
DestinationCidrBlock: 10.200.2.0/24
|
||||
RouteTableId: !ImportValue hard-k8s-rtb
|
||||
InstanceId: !ImportValue hard-k8s-worker-2
|
|
@ -0,0 +1,19 @@
|
|||
Resources:
|
||||
HardK8sSg:
|
||||
Type: AWS::EC2::SecurityGroup
|
||||
Properties:
|
||||
GroupDescription: security group for Kubernetes the hard way
|
||||
VpcId: !ImportValue hard-k8s-vpc
|
||||
SecurityGroupIngress:
|
||||
# ingress internal traffic - allow all protocols/ports
|
||||
- { "CidrIp": "10.240.0.0/24", "IpProtocol": "-1" } # master/worker nodes cidr range
|
||||
- { "CidrIp": "10.200.0.0/16", "IpProtocol": "-1" } # pod cidr range
|
||||
# ingress external traffic
|
||||
- { "CidrIp": "0.0.0.0/0", "IpProtocol": "tcp", "FromPort": 6443, "ToPort": 6443 }
|
||||
- { "CidrIp": "0.0.0.0/0", "IpProtocol": "tcp", "FromPort": 22, "ToPort": 22 }
|
||||
- { "CidrIp": "0.0.0.0/0", "IpProtocol": "icmp", "FromPort": -1, "ToPort": -1 }
|
||||
|
||||
Outputs:
|
||||
SgId:
|
||||
Value: !Ref HardK8sSg
|
||||
Export: { Name: hard-k8s-sg }
|
|
@ -0,0 +1,197 @@
|
|||
Resources:
|
||||
HardK8sWorker0:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.20
|
||||
# SourceDestCheck: false
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
Content-Type: multipart/mixed; boundary="//"
|
||||
MIME-Version: 1.0
|
||||
|
||||
--//
|
||||
Content-Type: text/cloud-config; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="cloud-config.txt"
|
||||
|
||||
#cloud-config
|
||||
fqdn: worker-0.k8shardway.local
|
||||
hostname: worker-0
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
|
||||
--//
|
||||
Content-Type: text/x-shellscript; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="userdata.txt"
|
||||
|
||||
#!/bin/bash
|
||||
echo 10.200.0.0/24 > /opt/pod_cidr.txt
|
||||
--//
|
||||
Tags: [ { "Key": "Name", "Value": "worker-0" } ]
|
||||
|
||||
HardK8sWorker1:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.21
|
||||
# SourceDestCheck: false
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
Content-Type: multipart/mixed; boundary="//"
|
||||
MIME-Version: 1.0
|
||||
|
||||
--//
|
||||
Content-Type: text/cloud-config; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="cloud-config.txt"
|
||||
|
||||
#cloud-config
|
||||
fqdn: worker-1.k8shardway.local
|
||||
hostname: worker-1
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
|
||||
--//
|
||||
Content-Type: text/x-shellscript; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="userdata.txt"
|
||||
|
||||
#!/bin/bash
|
||||
echo 10.200.1.0/24 > /opt/pod_cidr.txt
|
||||
--//
|
||||
Tags: [ { "Key": "Name", "Value": "worker-1" } ]
|
||||
|
||||
HardK8sWorker2:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.22
|
||||
# SourceDestCheck: false
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
Content-Type: multipart/mixed; boundary="//"
|
||||
MIME-Version: 1.0
|
||||
|
||||
--//
|
||||
Content-Type: text/cloud-config; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="cloud-config.txt"
|
||||
|
||||
#cloud-config
|
||||
fqdn: worker-2.k8shardway.local
|
||||
hostname: worker-2
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
|
||||
--//
|
||||
Content-Type: text/x-shellscript; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Content-Disposition: attachment; filename="userdata.txt"
|
||||
|
||||
#!/bin/bash
|
||||
echo 10.200.2.0/24 > /opt/pod_cidr.txt
|
||||
--//
|
||||
Tags: [ { "Key": "Name", "Value": "worker-2" } ]
|
||||
|
||||
Parameters:
|
||||
ParamKeyName:
|
||||
Type: AWS::EC2::KeyPair::KeyName
|
||||
Default: ec2-key
|
||||
|
||||
# $ aws ec2 describe-regions --query 'Regions[].RegionName' --output text \
|
||||
# | tr "\t" "\n" | sort \
|
||||
# | xargs -I _R_ aws --region _R_ ec2 describe-images \
|
||||
# --filters Name=name,Values="ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20191002" \
|
||||
# --query 'Images[0].ImageId' --output
|
||||
Mappings:
|
||||
UbuntuAMIs:
|
||||
ap-northeast-1: { "id": "ami-0cd744adeca97abb1" }
|
||||
ap-northeast-2: { "id": "ami-00379ec40a3e30f87" }
|
||||
ap-northeast-3: { "id": "ami-0bd42271bb31d96d2" }
|
||||
ap-south-1: { "id": "ami-0123b531fc646552f" }
|
||||
ap-southeast-1: { "id": "ami-061eb2b23f9f8839c" }
|
||||
ap-southeast-2: { "id": "ami-00a54827eb7ffcd3c" }
|
||||
ca-central-1: { "id": "ami-0b683aae4ee93ef87" }
|
||||
eu-central-1: { "id": "ami-0cc0a36f626a4fdf5" }
|
||||
eu-north-1: { "id": "ami-1dab2163" }
|
||||
eu-west-1: { "id": "ami-02df9ea15c1778c9c" }
|
||||
eu-west-2: { "id": "ami-0be057a22c63962cb" }
|
||||
eu-west-3: { "id": "ami-087855b6c8b59a9e4" }
|
||||
sa-east-1: { "id": "ami-02c8813f1ea04d4ab" }
|
||||
us-east-1: { "id": "ami-04b9e92b5572fa0d1" }
|
||||
us-east-2: { "id": "ami-0d5d9d301c853a04a" }
|
||||
us-west-1: { "id": "ami-0dd655843c87b6930" }
|
||||
us-west-2: { "id": "ami-06d51e91cea0dac8d" }
|
||||
|
||||
Outputs:
|
||||
Worker0:
|
||||
Value: !Ref HardK8sWorker0
|
||||
Export: { Name: hard-k8s-worker-0 }
|
||||
Worker1:
|
||||
Value: !Ref HardK8sWorker1
|
||||
Export: { Name: hard-k8s-worker-1 }
|
||||
Worker2:
|
||||
Value: !Ref HardK8sWorker2
|
||||
Export: { Name: hard-k8s-worker-2 }
|
|
@ -1,55 +1,42 @@
|
|||
# Prerequisites
|
||||
|
||||
## Google Cloud Platform
|
||||
## Amazon Web Services (AWS)
|
||||
|
||||
This tutorial leverages the [Google Cloud Platform](https://cloud.google.com/) to streamline provisioning of the compute infrastructure required to bootstrap a Kubernetes cluster from the ground up. [Sign up](https://cloud.google.com/free/) for $300 in free credits.
|
||||
This tutorial leverages the [Amazon Web Services](https://aws.amazon.com) to streamline provisioning of the compute infrastructure required to bootstrap a Kubernetes cluster from the ground up.
|
||||
|
||||
[Estimated cost](https://cloud.google.com/products/calculator/#id=55663256-c384-449c-9306-e39893e23afb) to run this tutorial: $0.23 per hour ($5.46 per day).
|
||||
> The compute resources required for this tutorial exceed the Amazon Web Services free tier.
|
||||
|
||||
> The compute resources required for this tutorial exceed the Google Cloud Platform free tier.
|
||||
|
||||
## Google Cloud Platform SDK
|
||||
## CloudFormation - Infrastructure as Code
|
||||
|
||||
### Install the Google Cloud SDK
|
||||
In this tutorial we use [CloudFormation](https://aws.amazon.com/cloudformation/), which enables you to provision AWS resources as a code (YAML file).
|
||||
|
||||
Follow the Google Cloud SDK [documentation](https://cloud.google.com/sdk/) to install and configure the `gcloud` command line utility.
|
||||
As a best practice you should consider using [Nested Stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) to combine associated CloudFormation stacks together. However, in this tutorial we provision AWS resources one by one via separated CloudFormation stacks for learning purpose.
|
||||
|
||||
Verify the Google Cloud SDK version is 262.0.0 or higher:
|
||||
All CloudFormation templates are in [cloudformation directory](../cloudformation/) of this repository.
|
||||
|
||||
## AWS CLI
|
||||
|
||||
### Install the AWS CLI
|
||||
|
||||
Follow the AWS documentation [Installing the AWS CLI version 1](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv1.html) to install and configure the `aws` command line utility.
|
||||
|
||||
```
|
||||
gcloud version
|
||||
$ aws --version
|
||||
```
|
||||
|
||||
### Set a Default Compute Region and Zone
|
||||
### Set a default region and credentials
|
||||
|
||||
This tutorial assumes a default compute region and zone have been configured.
|
||||
|
||||
If you are using the `gcloud` command-line tool for the first time `init` is the easiest way to do this:
|
||||
This tutorial assumes a default region and credentials. To configure the AWS CLI, you can follow this instruction: [Configuring the AWS CLI - AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html)
|
||||
|
||||
```
|
||||
gcloud init
|
||||
$ aws configure
|
||||
AWS Access Key ID [None]: AKIxxxxxxxxxxxxxMPLE
|
||||
AWS Secret Access Key [None]: wJalrXUxxxxxxxxxxxxxxxxxxxxxxxxxxxxLEKEY
|
||||
Default region name [None]: us-west-2
|
||||
Default output format [None]: json
|
||||
```
|
||||
|
||||
Then be sure to authorize gcloud to access the Cloud Platform with your Google user credentials:
|
||||
|
||||
```
|
||||
gcloud auth login
|
||||
```
|
||||
|
||||
Next set a default compute region and compute zone:
|
||||
|
||||
```
|
||||
gcloud config set compute/region us-west1
|
||||
```
|
||||
|
||||
Set a default compute zone:
|
||||
|
||||
```
|
||||
gcloud config set compute/zone us-west1-c
|
||||
```
|
||||
|
||||
> Use the `gcloud compute zones list` command to view additional regions and zones.
|
||||
|
||||
## Running Commands in Parallel with tmux
|
||||
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. Labs in this tutorial may require running the same commands across multiple compute instances, in those cases consider using tmux and splitting a window into multiple panes with synchronize-panes enabled to speed up the provisioning process.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Installing the Client Tools
|
||||
|
||||
In this lab you will install the command line utilities required to complete this tutorial: [cfssl](https://github.com/cloudflare/cfssl), [cfssljson](https://github.com/cloudflare/cfssl), and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl).
|
||||
In this lab you will install the command line utilities required to complete this tutorial: [cfssl, cfssljson](https://github.com/cloudflare/cfssl), and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl).
|
||||
|
||||
|
||||
## Install CFSSL
|
||||
|
@ -12,38 +12,38 @@ Download and install `cfssl` and `cfssljson`:
|
|||
### OS X
|
||||
|
||||
```
|
||||
curl -o cfssl https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/darwin/cfssl
|
||||
curl -o cfssljson https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/darwin/cfssljson
|
||||
$ curl -o cfssl https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/darwin/cfssl
|
||||
$ curl -o cfssljson https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/darwin/cfssljson
|
||||
```
|
||||
|
||||
```
|
||||
chmod +x cfssl cfssljson
|
||||
$ chmod +x cfssl cfssljson
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv cfssl cfssljson /usr/local/bin/
|
||||
$ sudo mv cfssl cfssljson /usr/local/bin/
|
||||
```
|
||||
|
||||
Some OS X users may experience problems using the pre-built binaries in which case [Homebrew](https://brew.sh) might be a better option:
|
||||
|
||||
```
|
||||
brew install cfssl
|
||||
$ brew install cfssl
|
||||
```
|
||||
|
||||
### Linux
|
||||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
$ wget -q --show-progress --https-only --timestamping \
|
||||
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/linux/cfssl \
|
||||
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/linux/cfssljson
|
||||
```
|
||||
|
||||
```
|
||||
chmod +x cfssl cfssljson
|
||||
$ chmod +x cfssl cfssljson
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv cfssl cfssljson /usr/local/bin/
|
||||
$ sudo mv cfssl cfssljson /usr/local/bin/
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
@ -51,21 +51,14 @@ sudo mv cfssl cfssljson /usr/local/bin/
|
|||
Verify `cfssl` and `cfssljson` version 1.3.4 or higher is installed:
|
||||
|
||||
```
|
||||
cfssl version
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
$ cfssl version
|
||||
Version: 1.3.4
|
||||
Revision: dev
|
||||
Runtime: go1.13
|
||||
```
|
||||
|
||||
```
|
||||
cfssljson --version
|
||||
```
|
||||
```
|
||||
$ cfssljson --version
|
||||
Version: 1.3.4
|
||||
Revision: dev
|
||||
Runtime: go1.13
|
||||
|
@ -78,29 +71,29 @@ The `kubectl` command line utility is used to interact with the Kubernetes API S
|
|||
### OS X
|
||||
|
||||
```
|
||||
curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/darwin/amd64/kubectl
|
||||
$ curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/darwin/amd64/kubectl
|
||||
```
|
||||
|
||||
```
|
||||
chmod +x kubectl
|
||||
$ chmod +x kubectl
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv kubectl /usr/local/bin/
|
||||
$ sudo mv kubectl /usr/local/bin/
|
||||
```
|
||||
|
||||
### Linux
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kubectl
|
||||
$ wget https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kubectl
|
||||
```
|
||||
|
||||
```
|
||||
chmod +x kubectl
|
||||
$ chmod +x kubectl
|
||||
```
|
||||
|
||||
```
|
||||
sudo mv kubectl /usr/local/bin/
|
||||
$ sudo mv kubectl /usr/local/bin/
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
@ -108,13 +101,8 @@ sudo mv kubectl /usr/local/bin/
|
|||
Verify `kubectl` version 1.15.3 or higher is installed:
|
||||
|
||||
```
|
||||
kubectl version --client
|
||||
$ kubectl version --client
|
||||
Client Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.0", GitCommit:"70132b0f130acc0bed193d9ba59dd186f0e634cf", GitTreeState:"clean", BuildDate:"2019-12-13T11:51:44Z", GoVersion:"go1.13.4", Compiler:"gc", Platform:"darwin/amd64"}
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
Client Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.3", GitCommit:"2d3c76f9091b6bec110a5e63777c332469e0cba2", GitTreeState:"clean", BuildDate:"2019-08-19T11:13:54Z", GoVersion:"go1.12.9", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
Next: [Provisioning Compute Resources](03-compute-resources.md)
|
||||
Next: [Provisioning Compute Resources](03-compute-resources.md)
|
|
@ -1,8 +1,8 @@
|
|||
# Provisioning Compute Resources
|
||||
|
||||
Kubernetes requires a set of machines to host the Kubernetes control plane and the worker nodes where containers are ultimately run. In this lab you will provision the compute resources required for running a secure and highly available Kubernetes cluster across a single [compute zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones).
|
||||
Kubernetes requires a set of machines to host the Kubernetes control plane and the worker nodes where containers are ultimately run. In this lab you will provision the compute resources required for running a secure and highly available Kubernetes cluster across a single [availability zone](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) on AWS.
|
||||
|
||||
> Ensure a default compute zone and region have been set as described in the [Prerequisites](01-prerequisites.md#set-a-default-compute-region-and-zone) lab.
|
||||
> Ensure a default region has been set as described in the [Prerequisites](01-prerequisites.md) lab.
|
||||
|
||||
## Networking
|
||||
|
||||
|
@ -10,220 +10,367 @@ The Kubernetes [networking model](https://kubernetes.io/docs/concepts/cluster-ad
|
|||
|
||||
> Setting up network policies is out of scope for this tutorial.
|
||||
|
||||
### Virtual Private Cloud Network
|
||||
### Amazon Virtual Private Cloud (VPC)
|
||||
|
||||
In this section a dedicated [Virtual Private Cloud](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) (VPC) network will be setup to host the Kubernetes cluster.
|
||||
In this section a dedicated [Amazon Virtual Private Cloud](https://aws.amazon.com/vpc/) (VPC) network will be setup to host the Kubernetes cluster. The VPC should contain a public [subnet](https://docs.aws.amazon.com/vpc/latest/userguide//VPC_Subnets.html), routing rules, and security groups.
|
||||
|
||||
Create the `kubernetes-the-hard-way` custom VPC network:
|
||||
Here's a CloudFormation template that defines network resources:
|
||||
|
||||
```
|
||||
gcloud compute networks create kubernetes-the-hard-way --subnet-mode custom
|
||||
Reference: [cloudformation/hard-k8s-network.cfn.yml](../cloudformation/hard-k8s-network.cfn.yml)
|
||||
```yaml
|
||||
Resources:
|
||||
HardK8sVpc:
|
||||
Type: AWS::EC2::VPC
|
||||
Properties:
|
||||
CidrBlock: "10.240.0.0/16"
|
||||
EnableDnsHostnames: true
|
||||
EnableDnsSupport: true
|
||||
HardK8sSubnet:
|
||||
Type: AWS::EC2::Subnet
|
||||
Properties:
|
||||
VpcId: !Ref HardK8sVpc
|
||||
CidrBlock: "10.240.0.0/24"
|
||||
MapPublicIpOnLaunch: true
|
||||
# ...
|
||||
```
|
||||
|
||||
A [subnet](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster.
|
||||
Please note that the subnet `CidrBlock` must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster.
|
||||
|
||||
Create the `kubernetes` subnet in the `kubernetes-the-hard-way` VPC network:
|
||||
> The `10.240.0.0/24` IP address range can host up to 254 EC2 instances.
|
||||
|
||||
Now create network resources via AWS CLI command:
|
||||
|
||||
```
|
||||
gcloud compute networks subnets create kubernetes \
|
||||
--network kubernetes-the-hard-way \
|
||||
--range 10.240.0.0/24
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-network \
|
||||
--template-body file://cloudformation/hard-k8s-network.cfn.yml
|
||||
```
|
||||
|
||||
> The `10.240.0.0/24` IP address range can host up to 254 compute instances.
|
||||
|
||||
### Firewall Rules
|
||||
### Security Groups
|
||||
|
||||
Create a firewall rule that allows internal communication across all protocols:
|
||||
Create a security group that meet following requirements:
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-internal \
|
||||
--allow tcp,udp,icmp \
|
||||
--network kubernetes-the-hard-way \
|
||||
--source-ranges 10.240.0.0/24,10.200.0.0/16
|
||||
* allows all internal traffic from VPC CIDR range (as defined above, `10.240.0.0/24`)
|
||||
* allows all internal traffic from PODs CIDR range (it can be defined arbitrary - let's say `10.200.0.0/16`)
|
||||
* allows external ingress TCP traffic on port 22 and 6443 from anywhere (`0.0.0.0/0`)
|
||||
* allows external ingress ICMP traffic from anywhere (`0.0.0.0/0`)
|
||||
* external egress traffic is allowed implicitly, so we don't need to define them.
|
||||
|
||||
Here's a CloudFormation template file to create a security group with requirements above.
|
||||
|
||||
Reference: [cloudformation/hard-k8s-security-groups.cfn.yml](../cloudformation/hard-k8s-security-groups.cfn.yml)
|
||||
```yaml
|
||||
Resources:
|
||||
HardK8sSg:
|
||||
Type: AWS::EC2::SecurityGroup
|
||||
Properties:
|
||||
GroupDescription: security group for Kubernetes the hard way
|
||||
VpcId: !ImportValue hard-k8s-vpc
|
||||
SecurityGroupIngress:
|
||||
# ingress internal traffic - allow all protocols/ports
|
||||
- { "CidrIp": "10.240.0.0/24", "IpProtocol": "-1" } # master/worker nodes cidr range
|
||||
- { "CidrIp": "10.200.0.0/16", "IpProtocol": "-1" } # pod cidr range
|
||||
# ingress external traffic
|
||||
- { "CidrIp": "0.0.0.0/0", "IpProtocol": "tcp", "FromPort": 6443, "ToPort": 6443 }
|
||||
- { "CidrIp": "0.0.0.0/0", "IpProtocol": "tcp", "FromPort": 22, "ToPort": 22 }
|
||||
- { "CidrIp": "0.0.0.0/0", "IpProtocol": "icmp", "FromPort": -1, "ToPort": -1 }
|
||||
# ...
|
||||
```
|
||||
|
||||
Create a firewall rule that allows external SSH, ICMP, and HTTPS:
|
||||
This security group will be used for master and worker nodes. It allows internal all traffic from `10.240.0.0/24` (which is the subnet CIDR range we've created above) and `10.200.0.0/16`
|
||||
|
||||
|
||||
Then create a CloudFormation stack to provision the security group.
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-external \
|
||||
--allow tcp:22,tcp:6443,icmp \
|
||||
--network kubernetes-the-hard-way \
|
||||
--source-ranges 0.0.0.0/0
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-security-groups \
|
||||
--template-body file://cloudformation/hard-k8s-security-groups.cfn.yml
|
||||
```
|
||||
|
||||
> An [external load balancer](https://cloud.google.com/compute/docs/load-balancing/network/) will be used to expose the Kubernetes API Servers to remote clients.
|
||||
|
||||
List the firewall rules in the `kubernetes-the-hard-way` VPC network:
|
||||
List rules in the created security group:
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules list --filter="network:kubernetes-the-hard-way"
|
||||
```
|
||||
$ aws ec2 describe-security-groups \
|
||||
--filters 'Name=description,Values="security group for Kubernetes the hard way"' \
|
||||
--query 'SecurityGroups[0].IpPermissions'
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME NETWORK DIRECTION PRIORITY ALLOW DENY
|
||||
kubernetes-the-hard-way-allow-external kubernetes-the-hard-way INGRESS 1000 tcp:22,tcp:6443,icmp
|
||||
kubernetes-the-hard-way-allow-internal kubernetes-the-hard-way INGRESS 1000 tcp,udp,icmp
|
||||
[
|
||||
{
|
||||
"IpProtocol": "-1",
|
||||
"IpRanges": [ { "CidrIp": "10.240.0.0/24" }, { "CidrIp": "10.200.0.0/16" } ],...
|
||||
},
|
||||
{
|
||||
"IpProtocol": "tcp",
|
||||
"IpRanges": [ { "CidrIp": "0.0.0.0/0" } ],
|
||||
"FromPort": 6443, "ToPort": 6443,...
|
||||
},
|
||||
{
|
||||
"IpProtocol": "tcp",
|
||||
"IpRanges": [ { "CidrIp": "0.0.0.0/0" } ],
|
||||
"FromPort": 22, "ToPort": 22,...
|
||||
},
|
||||
{
|
||||
"IpProtocol": "icmp",
|
||||
"IpRanges": [ { "CidrIp": "0.0.0.0/0" } ],
|
||||
"FromPort": -1, "ToPort": -1,...
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Kubernetes Public IP Address
|
||||
|
||||
Allocate a static IP address that will be attached to the external load balancer fronting the Kubernetes API Servers:
|
||||
Using [Elastic IP Addresses (EIP)](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) you can allocate a static IP address that will be attached to the [Network Load Balancer (NLB)](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) fronting the Kubernetes API Servers.
|
||||
|
||||
```
|
||||
gcloud compute addresses create kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region)
|
||||
Let's create an EIP which we'll use for NLB later.
|
||||
|
||||
Reference: [cloudformation/hard-k8s-eip.cfn.yml](../cloudformation/hard-k8s-eip.cfn.yml)
|
||||
```yaml
|
||||
Resources:
|
||||
HardK8sEIP:
|
||||
Type: AWS::EC2::EIP
|
||||
Properties:
|
||||
Tags:
|
||||
- Key: Name
|
||||
Value: eip-kubernetes-the-hard-way
|
||||
|
||||
Outputs:
|
||||
EipAllocation:
|
||||
Value: !GetAtt HardK8sEIP.AllocationId
|
||||
Export: { Name: hard-k8s-eipalloc }
|
||||
```
|
||||
|
||||
Verify the `kubernetes-the-hard-way` static IP address was created in your default compute region:
|
||||
Allocate Elastic IP Address via CloudFormation:
|
||||
|
||||
```
|
||||
gcloud compute addresses list --filter="name=('kubernetes-the-hard-way')"
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-eip \
|
||||
--template-body file://cloudformation/hard-k8s-eip.cfn.yml
|
||||
```
|
||||
|
||||
> output
|
||||
The EIP is tagged `eip-kubernetes-the-hard-way` as a name so that we can retrieve it easily.
|
||||
|
||||
```
|
||||
NAME REGION ADDRESS STATUS
|
||||
kubernetes-the-hard-way us-west1 XX.XXX.XXX.XX RESERVED
|
||||
$ aws ec2 describe-addresses --filters "Name=tag:Name,Values=eip-kubernetes-the-hard-way"
|
||||
{
|
||||
"Addresses": [
|
||||
{
|
||||
"PublicIp": "x.xxx.xx.xx",
|
||||
"AllocationId": "eipalloc-xxxxxxxxxxxxxxxxx",
|
||||
"Domain": "vpc",
|
||||
"PublicIpv4Pool": "amazon",
|
||||
"Tags": [
|
||||
{ "Key": "Name", "Value": "eip-kubernetes-the-hard-way" },...
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Compute Instances
|
||||
## EC2 instances
|
||||
|
||||
The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 18.04, which has good support for the [containerd container runtime](https://github.com/containerd/containerd). Each compute instance will be provisioned with a fixed private IP address to simplify the Kubernetes bootstrapping process.
|
||||
[Amazon EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts.html) instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 18.04. Each EC2 instance will be provisioned with a fixed private IP address to simplify the Kubernetes bootstrapping process.
|
||||
|
||||
### Kubernetes Controllers
|
||||
You connect EC2 instances via SSH so make sure you've created and have at least one SSH key pairs in your account and the region you're working on. For more information: [Amazon EC2 Key Pairs - Amazon Elastic Compute Cloud](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html).
|
||||
|
||||
Create three compute instances which will host the Kubernetes control plane:
|
||||
### Kubernetes Master nodes (Control Plane)
|
||||
|
||||
```
|
||||
for i in 0 1 2; do
|
||||
gcloud compute instances create controller-${i} \
|
||||
--async \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image-family ubuntu-1804-lts \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--private-network-ip 10.240.0.1${i} \
|
||||
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
|
||||
--subnet kubernetes \
|
||||
--tags kubernetes-the-hard-way,controller
|
||||
done
|
||||
Create three EC2 instances which will host the Kubernetes control plane:
|
||||
|
||||
Reference: [cloudformation/hard-k8s-master-nodes.cfn.yml](../cloudformation/hard-k8s-master-nodes.cfn.yml)
|
||||
```yaml
|
||||
Resources:
|
||||
HardK8sMaster0:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.10
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
#cloud-config
|
||||
fqdn: master-0.k8shardway.local
|
||||
hostname: master-0
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
Tags: [ { "Key": "Name", "Value": "master-0" } ]
|
||||
# ...
|
||||
|
||||
Parameters:
|
||||
ParamKeyName:
|
||||
Type: AWS::EC2::KeyPair::KeyName
|
||||
Default: ec2-key
|
||||
|
||||
# $ aws ec2 describe-regions --query 'Regions[].RegionName' --output text \
|
||||
# | tr "\t" "\n" | sort \
|
||||
# | xargs -I _R_ aws --region _R_ ec2 describe-images \
|
||||
# --filters Name=name,Values="ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20191002" \
|
||||
# --query 'Images[0].ImageId' --output
|
||||
Mappings:
|
||||
UbuntuAMIs:
|
||||
ap-northeast-1: { "id": "ami-0cd744adeca97abb1" }
|
||||
# ...
|
||||
|
||||
Outputs:
|
||||
Master0:
|
||||
Value: !Ref HardK8sMaster0
|
||||
Export: { Name: hard-k8s-master-0 }
|
||||
# ...
|
||||
```
|
||||
|
||||
### Kubernetes Workers
|
||||
Note that we use cloud-config definitions to set hostname for each master node. They would be `master-0`, `master-1`, and `master-2` for master nodes (control plane).
|
||||
|
||||
Each worker instance requires a pod subnet allocation from the Kubernetes cluster CIDR range. The pod subnet allocation will be used to configure container networking in a later exercise. The `pod-cidr` instance metadata will be used to expose pod subnet allocations to compute instances at runtime.
|
||||
Create master nodes via CloudFormation. Please note that you have to replace `<your_ssh_key_name>` with your EC2 key pair name.
|
||||
|
||||
```
|
||||
$ aws ec2 describe-key-pairs --query 'KeyPairs[].KeyName'
|
||||
[
|
||||
"my-key-name-1",
|
||||
"my-key-name-2"
|
||||
]
|
||||
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-master-nodes \
|
||||
--parameters ParameterKey=ParamKeyName,ParameterValue=<your_ssh_key_name> \
|
||||
--template-body file://cloudformation/hard-k8s-master-nodes.cfn.yml
|
||||
```
|
||||
|
||||
|
||||
### Kubernetes Worker nodes (Data Plane)
|
||||
|
||||
Each worker instance requires a pod subnet allocation from the Kubernetes cluster CIDR range. The pod subnet allocation will be used to configure container networking in a later exercise. We will use [instance UserData](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html) to put pod subnet allocations information to EC2 instances' `/opt/pod_cidr.txt` at runtime.
|
||||
|
||||
> The Kubernetes cluster CIDR range is defined by the Controller Manager's `--cluster-cidr` flag. In this tutorial the cluster CIDR range will be set to `10.200.0.0/16`, which supports 254 subnets.
|
||||
|
||||
Create three compute instances which will host the Kubernetes worker nodes:
|
||||
Create three EC2 instances which will host the Kubernetes worker nodes:
|
||||
|
||||
```
|
||||
for i in 0 1 2; do
|
||||
gcloud compute instances create worker-${i} \
|
||||
--async \
|
||||
--boot-disk-size 200GB \
|
||||
--can-ip-forward \
|
||||
--image-family ubuntu-1804-lts \
|
||||
--image-project ubuntu-os-cloud \
|
||||
--machine-type n1-standard-1 \
|
||||
--metadata pod-cidr=10.200.${i}.0/24 \
|
||||
--private-network-ip 10.240.0.2${i} \
|
||||
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
|
||||
--subnet kubernetes \
|
||||
--tags kubernetes-the-hard-way,worker
|
||||
done
|
||||
Reference: [cloudformation/hard-k8s-worker-nodes.cfn.yml](../cloudformation/hard-k8s-worker-nodes.cfn.yml)
|
||||
```yaml
|
||||
Resources:
|
||||
HardK8sWorker0:
|
||||
Type: AWS::EC2::Instance
|
||||
Properties:
|
||||
InstanceType: t3.micro
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
SecurityGroupIds:
|
||||
- !ImportValue hard-k8s-sg
|
||||
ImageId:
|
||||
Fn::FindInMap: [UbuntuAMIs, !Ref "AWS::Region", "id"]
|
||||
KeyName: !Ref ParamKeyName
|
||||
PrivateIpAddress: 10.240.0.20
|
||||
UserData:
|
||||
Fn::Base64: |-
|
||||
Content-Type: multipart/mixed; boundary="//"
|
||||
# ...
|
||||
#cloud-config
|
||||
fqdn: worker-0.k8shardway.local
|
||||
hostname: worker-0
|
||||
runcmd:
|
||||
- echo "preserve_hostname: true" >> /etc/cloud/cloud.cfg
|
||||
write_files:
|
||||
- path: /etc/hosts
|
||||
permissions: '0644'
|
||||
content: |
|
||||
127.0.0.1 localhost localhost.localdomain
|
||||
# Kubernetes the Hard Way - hostnames
|
||||
10.240.0.10 master-0
|
||||
10.240.0.11 master-1
|
||||
10.240.0.12 master-2
|
||||
10.240.0.20 worker-0
|
||||
10.240.0.21 worker-1
|
||||
10.240.0.22 worker-2
|
||||
|
||||
--//
|
||||
# ...
|
||||
#!/bin/bash
|
||||
echo 10.200.0.0/24 > /opt/pod_cidr.txt
|
||||
--//
|
||||
# ...
|
||||
```
|
||||
|
||||
### Verification
|
||||
Here we use cloud-config to set hostname like worker nodes. Worker nodes' hostname would be `worker-0`, `worker-1`, and `worker-2` for worker nodes (data plane). Also using [Mime multi-part](https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html) contents for [cloud-init UserData Formats](https://cloudinit.readthedocs.io/en/latest/topics/format.html), we define shell script that save PODs CIDR range in the instance filesystem `/opt/pod_cidr.txt` as well.
|
||||
|
||||
List the compute instances in your default compute zone:
|
||||
Create worker nodes via CloudFormation.
|
||||
|
||||
```
|
||||
gcloud compute instances list
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-worker-nodes \
|
||||
--parameters ParameterKey=ParamKeyName,ParameterValue=<your_ssh_key> \
|
||||
--template-body file://cloudformation/hard-k8s-worker-nodes.cfn.yml
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
### Verification of nodes
|
||||
|
||||
List the instances in your newly created VPC:
|
||||
|
||||
```
|
||||
NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS
|
||||
controller-0 us-west1-c n1-standard-1 10.240.0.10 XX.XXX.XXX.XXX RUNNING
|
||||
controller-1 us-west1-c n1-standard-1 10.240.0.11 XX.XXX.X.XX RUNNING
|
||||
controller-2 us-west1-c n1-standard-1 10.240.0.12 XX.XXX.XXX.XX RUNNING
|
||||
worker-0 us-west1-c n1-standard-1 10.240.0.20 XXX.XXX.XXX.XX RUNNING
|
||||
worker-1 us-west1-c n1-standard-1 10.240.0.21 XX.XXX.XX.XXX RUNNING
|
||||
worker-2 us-west1-c n1-standard-1 10.240.0.22 XXX.XXX.XX.XX RUNNING
|
||||
$ aws cloudformation describe-stacks --stack-name hard-k8s-network --query 'Stacks[0].Outputs[].OutputValue'
|
||||
[
|
||||
"vpc-xxxxxxxxxxxxxxxxx",
|
||||
"subnet-yyyyyyyyyyyyyyyyy",
|
||||
"rtb-zzzzzzzzzzzzzzzzz"
|
||||
]
|
||||
|
||||
$ VPC_ID=$(aws cloudformation describe-stacks \
|
||||
--stack-name hard-k8s-network \
|
||||
--query 'Stacks[0].Outputs[?ExportName==`hard-k8s-vpc`].OutputValue' --output text)
|
||||
|
||||
$ aws ec2 describe-instances \
|
||||
--filters Name=vpc-id,Values=$VPC_ID \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort
|
||||
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1c 10.240.0.10 xx.xxx.xx.xxx running
|
||||
master-1 i-yyyyyyyyyyyyyyyyy ap-northeast-1c 10.240.0.11 xx.xxx.xxx.xxx running
|
||||
master-2 i-zzzzzzzzzzzzzzzzz ap-northeast-1c 10.240.0.12 xx.xxx.xx.xxx running
|
||||
worker-0 i-aaaaaaaaaaaaaaaaa ap-northeast-1c 10.240.0.20 x.xxx.xx.xx running
|
||||
worker-1 i-bbbbbbbbbbbbbbbbb ap-northeast-1c 10.240.0.21 xx.xxx.xx.xxx running
|
||||
worker-2 i-ccccccccccccccccc ap-northeast-1c 10.240.0.22 xx.xxx.xxx.xxx running
|
||||
```
|
||||
|
||||
## Configuring SSH Access
|
||||
|
||||
SSH will be used to configure the controller and worker instances. When connecting to compute instances for the first time SSH keys will be generated for you and stored in the project or instance metadata as described in the [connecting to instances](https://cloud.google.com/compute/docs/instances/connecting-to-instance) documentation.
|
||||
## Verifying SSH Access
|
||||
|
||||
Test SSH access to the `controller-0` compute instances:
|
||||
As mentioned above, SSH will be used to configure the master and worker instances. We have already configured master and worker instances with `KeyName` property, you can connect instances via ssh. For more details please take a look at the documentation: [Connecting to Your Linux Instance Using SSH - Amazon Elastic Compute Cloud](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html)
|
||||
|
||||
Let's test SSH access to the `master-0` EC2 instance via its Public IP address:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
$ ssh -i ~/.ssh/your_ssh_key ubuntu@xx.xxx.xx.xxx
|
||||
# ...
|
||||
Are you sure you want to continue connecting (yes/no)? yes
|
||||
Warning: Permanently added 'xx.xxx.xx.xxx' (ECDSA) to the list of known hosts.
|
||||
Welcome to Ubuntu 18.04.3 LTS (GNU/Linux 4.15.0-1051-aws x86_64)
|
||||
# ...
|
||||
ubuntu@master-0:~$
|
||||
```
|
||||
|
||||
If this is your first time connecting to a compute instance SSH keys will be generated for you. Enter a passphrase at the prompt to continue:
|
||||
Type `exit` at the prompt to exit the `master-0` instance:
|
||||
|
||||
```
|
||||
WARNING: The public SSH key file for gcloud does not exist.
|
||||
WARNING: The private SSH key file for gcloud does not exist.
|
||||
WARNING: You do not have an SSH key for gcloud.
|
||||
WARNING: SSH keygen will be executed to generate a key.
|
||||
Generating public/private rsa key pair.
|
||||
Enter passphrase (empty for no passphrase):
|
||||
Enter same passphrase again:
|
||||
```
|
||||
ubuntu@master-0:~$ exit
|
||||
|
||||
At this point the generated SSH keys will be uploaded and stored in your project:
|
||||
|
||||
```
|
||||
Your identification has been saved in /home/$USER/.ssh/google_compute_engine.
|
||||
Your public key has been saved in /home/$USER/.ssh/google_compute_engine.pub.
|
||||
The key fingerprint is:
|
||||
SHA256:nz1i8jHmgQuGt+WscqP5SeIaSy5wyIJeL71MuV+QruE $USER@$HOSTNAME
|
||||
The key's randomart image is:
|
||||
+---[RSA 2048]----+
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| . |
|
||||
|o. oS |
|
||||
|=... .o .o o |
|
||||
|+.+ =+=.+.X o |
|
||||
|.+ ==O*B.B = . |
|
||||
| .+.=EB++ o |
|
||||
+----[SHA256]-----+
|
||||
Updating project ssh metadata...-Updated [https://www.googleapis.com/compute/v1/projects/$PROJECT_ID].
|
||||
Updating project ssh metadata...done.
|
||||
Waiting for SSH key to propagate.
|
||||
```
|
||||
|
||||
After the SSH keys have been updated you'll be logged into the `controller-0` instance:
|
||||
|
||||
```
|
||||
Welcome to Ubuntu 18.04.3 LTS (GNU/Linux 4.15.0-1042-gcp x86_64)
|
||||
...
|
||||
|
||||
Last login: Sun Sept 14 14:34:27 2019 from XX.XXX.XXX.XX
|
||||
```
|
||||
|
||||
Type `exit` at the prompt to exit the `controller-0` compute instance:
|
||||
|
||||
```
|
||||
$USER@controller-0:~$ exit
|
||||
```
|
||||
> output
|
||||
|
||||
```
|
||||
logout
|
||||
Connection to XX.XXX.XXX.XXX closed
|
||||
Connection to xx.xxx.xx.xxx closed.
|
||||
```
|
||||
|
||||
Next: [Provisioning a CA and Generating TLS Certificates](04-certificate-authority.md)
|
||||
Next: [Provisioning a CA and Generating TLS Certificates](04-certificate-authority.md)
|
|
@ -9,9 +9,7 @@ In this section you will provision a Certificate Authority that can be used to g
|
|||
Generate the CA configuration file, certificate, and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > ca-config.json <<EOF
|
||||
$ cat > ca-config.json <<EOF
|
||||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
|
@ -27,7 +25,7 @@ cat > ca-config.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cat > ca-csr.json <<EOF
|
||||
$ cat > ca-csr.json <<EOF
|
||||
{
|
||||
"CN": "Kubernetes",
|
||||
"key": {
|
||||
|
@ -46,9 +44,7 @@ cat > ca-csr.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||
|
||||
}
|
||||
$ cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -67,9 +63,7 @@ In this section you will generate client and server certificates for each Kubern
|
|||
Generate the `admin` client certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > admin-csr.json <<EOF
|
||||
$ cat > admin-csr.json <<EOF
|
||||
{
|
||||
"CN": "admin",
|
||||
"key": {
|
||||
|
@ -88,14 +82,12 @@ cat > admin-csr.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
$ cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
admin-csr.json | cfssljson -bare admin
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -112,39 +104,39 @@ Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/doc
|
|||
Generate a certificate and private key for each Kubernetes worker node:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
cat > ${instance}-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:node:${instance}",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:nodes",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
EXTERNAL_IP=$(gcloud compute instances describe ${instance} \
|
||||
--format 'value(networkInterfaces[0].accessConfigs[0].natIP)')
|
||||
|
||||
INTERNAL_IP=$(gcloud compute instances describe ${instance} \
|
||||
--format 'value(networkInterfaces[0].networkIP)')
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-hostname=${instance},${EXTERNAL_IP},${INTERNAL_IP} \
|
||||
-profile=kubernetes \
|
||||
${instance}-csr.json | cfssljson -bare ${instance}
|
||||
$ for instance in worker-0 worker-1 worker-2; do
|
||||
cat > ${instance}-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:node:${instance}",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "US",
|
||||
"L": "Portland",
|
||||
"O": "system:nodes",
|
||||
"OU": "Kubernetes The Hard Way",
|
||||
"ST": "Oregon"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
EXTERNAL_IP=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=${instance}" "Name=instance-state-name,Values=running" \
|
||||
--query 'Reservations[0].Instances[0].PublicIpAddress' --output text)
|
||||
|
||||
INTERNAL_IP=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=${instance}" "Name=instance-state-name,Values=running" \
|
||||
--query 'Reservations[0].Instances[0].PrivateIpAddress' --output text)
|
||||
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-hostname=${instance},${EXTERNAL_IP},${INTERNAL_IP} \
|
||||
-profile=kubernetes \
|
||||
${instance}-csr.json | cfssljson -bare ${instance}
|
||||
done
|
||||
```
|
||||
|
||||
|
@ -164,9 +156,7 @@ worker-2.pem
|
|||
Generate the `kube-controller-manager` client certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > kube-controller-manager-csr.json <<EOF
|
||||
$ cat > kube-controller-manager-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:kube-controller-manager",
|
||||
"key": {
|
||||
|
@ -185,14 +175,12 @@ cat > kube-controller-manager-csr.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
$ cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -208,9 +196,7 @@ kube-controller-manager.pem
|
|||
Generate the `kube-proxy` client certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > kube-proxy-csr.json <<EOF
|
||||
$ cat > kube-proxy-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:kube-proxy",
|
||||
"key": {
|
||||
|
@ -229,14 +215,12 @@ cat > kube-proxy-csr.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
$ cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
kube-proxy-csr.json | cfssljson -bare kube-proxy
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -251,9 +235,7 @@ kube-proxy.pem
|
|||
Generate the `kube-scheduler` client certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > kube-scheduler-csr.json <<EOF
|
||||
$ cat > kube-scheduler-csr.json <<EOF
|
||||
{
|
||||
"CN": "system:kube-scheduler",
|
||||
"key": {
|
||||
|
@ -272,14 +254,12 @@ cat > kube-scheduler-csr.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
$ cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -292,20 +272,27 @@ kube-scheduler.pem
|
|||
|
||||
### The Kubernetes API Server Certificate
|
||||
|
||||
The `kubernetes-the-hard-way` static IP address will be included in the list of subject alternative names for the Kubernetes API Server certificate. This will ensure the certificate can be validated by remote clients.
|
||||
In the previous section we've created [Kubernetes Public IP Address](03-compute-resources.md#kubernetes-public-ip-address) with [Elastic IP Addresses (EIP)](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html). The EIP will be included in the list of subject alternative names for the Kubernetes API Server certificate. This will ensure the certificate can be validated by remote clients.
|
||||
|
||||
Generate the Kubernetes API Server certificate and private key:
|
||||
You can retrieve the EIP named `eip-kubernetes-the-hard-way` we've created in the [previous section](03-compute-resources.md) by executing following command:
|
||||
|
||||
```
|
||||
{
|
||||
$ aws ec2 describe-addresses \
|
||||
--filters "Name=tag:Name,Values=eip-kubernetes-the-hard-way" \
|
||||
--query 'Addresses[0].PublicIp' --output text
|
||||
xxx.xxx.xxx.xx
|
||||
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
$ KUBERNETES_PUBLIC_ADDRESS=$(aws ec2 describe-addresses \
|
||||
--filters "Name=tag:Name,Values=eip-kubernetes-the-hard-way" \
|
||||
--query 'Addresses[0].PublicIp' --output text)
|
||||
```
|
||||
|
||||
KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local
|
||||
Then, using this environemnt variable let's generate the Kubernetes API Server certificate and private key:
|
||||
|
||||
cat > kubernetes-csr.json <<EOF
|
||||
```
|
||||
$ KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local
|
||||
|
||||
$ cat > kubernetes-csr.json <<EOF
|
||||
{
|
||||
"CN": "kubernetes",
|
||||
"key": {
|
||||
|
@ -324,18 +311,16 @@ cat > kubernetes-csr.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
$ cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-hostname=10.32.0.1,10.240.0.10,10.240.0.11,10.240.0.12,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,${KUBERNETES_HOSTNAMES} \
|
||||
-profile=kubernetes \
|
||||
kubernetes-csr.json | cfssljson -bare kubernetes
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
> The Kubernetes API server is automatically assigned the `kubernetes` internal dns name, which will be linked to the first IP address (`10.32.0.1`) from the address range (`10.32.0.0/24`) reserved for internal cluster services during the [control plane bootstrapping](08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server) lab.
|
||||
> The Kubernetes API server is automatically assigned the `kubernetes` internal dns name, which will be linked to the first IP address (`10.32.0.1`) from the address range (`10.32.0.0/24`) reserved for internal cluster services during the [control plane bootstrapping](08-bootstrapping-kubernetes-controllers.md) lab.
|
||||
|
||||
Results:
|
||||
|
||||
|
@ -351,9 +336,7 @@ The Kubernetes Controller Manager leverages a key pair to generate and sign serv
|
|||
Generate the `service-account` certificate and private key:
|
||||
|
||||
```
|
||||
{
|
||||
|
||||
cat > service-account-csr.json <<EOF
|
||||
$ cat > service-account-csr.json <<EOF
|
||||
{
|
||||
"CN": "service-accounts",
|
||||
"key": {
|
||||
|
@ -372,14 +355,12 @@ cat > service-account-csr.json <<EOF
|
|||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert \
|
||||
$ cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes \
|
||||
service-account-csr.json | cfssljson -bare service-account
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -392,23 +373,38 @@ service-account.pem
|
|||
|
||||
## Distribute the Client and Server Certificates
|
||||
|
||||
Copy the appropriate certificates and private keys to each worker instance:
|
||||
Copy the appropriate certificates and private keys to each worker EC2 instance:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
gcloud compute scp ca.pem ${instance}-key.pem ${instance}.pem ${instance}:~/
|
||||
done
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep worker
|
||||
worker-0 i-aaaaaaaaaaaaaaaaa ap-northeast-1c 10.240.0.20 aa.aaa.aaa.aaa running
|
||||
worker-1 i-bbbbbbbbbbbbbbbbb ap-northeast-1c 10.240.0.21 b.bbb.b.bbb running
|
||||
worker-2 i-ccccccccccccccccc ap-northeast-1c 10.240.0.22 cc.ccc.cc.ccc running
|
||||
|
||||
$ scp -i ~/.ssh/your_ssh_key worker-0-key.pem worker-0.pem ca.pem ubuntu@aa.aaa.aaa.aaa:~/
|
||||
$ scp -i ~/.ssh/your_ssh_key worker-1-key.pem worker-1.pem ca.pem ubuntu@b.bbb.b.bbb:~/
|
||||
$ scp -i ~/.ssh/your_ssh_key worker-2-key.pem worker-2.pem ca.pem ubuntu@cc.ccc.cc.ccc:~/
|
||||
```
|
||||
|
||||
Copy the appropriate certificates and private keys to each controller instance:
|
||||
Copy the appropriate certificates and private keys to each master EC2 instance:
|
||||
|
||||
```
|
||||
for instance in controller-0 controller-1 controller-2; do
|
||||
gcloud compute scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
|
||||
service-account-key.pem service-account.pem ${instance}:~/
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep master
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1c 10.240.0.10 xx.xxx.xxx.xxx running
|
||||
master-1 i-yyyyyyyyyyyyyyyyy ap-northeast-1c 10.240.0.11 yy.yyy.yyy.yy running
|
||||
master-2 i-zzzzzzzzzzzzzzzzz ap-northeast-1c 10.240.0.12 zz.zzz.z.zzz running
|
||||
|
||||
$ for masternode in xx.xxx.xxx.xxx yy.yyy.yyy.yy zz.zzz.z.zzz; do
|
||||
scp -i ~/.ssh/your_ssh_key \
|
||||
ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem service-account-key.pem service-account.pem \
|
||||
ubuntu@${masternode}:~/
|
||||
done
|
||||
```
|
||||
|
||||
> The `kube-proxy`, `kube-controller-manager`, `kube-scheduler`, and `kubelet` client certificates will be used to generate client authentication configuration files in the next lab.
|
||||
|
||||
Next: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md)
|
||||
Next: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md)
|
|
@ -10,12 +10,12 @@ In this section you will generate kubeconfig files for the `controller manager`,
|
|||
|
||||
Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the external load balancer fronting the Kubernetes API Servers will be used.
|
||||
|
||||
Retrieve the `kubernetes-the-hard-way` static IP address:
|
||||
Retrieve the EIP (Elastic IP Addresse) named `eip-kubernetes-the-hard-way`:
|
||||
|
||||
```
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
$ KUBERNETES_PUBLIC_ADDRESS=$(aws ec2 describe-addresses \
|
||||
--filters "Name=tag:Name,Values=eip-kubernetes-the-hard-way" \
|
||||
--query 'Addresses[0].PublicIp' --output text)
|
||||
```
|
||||
|
||||
### The kubelet Kubernetes Configuration File
|
||||
|
@ -27,7 +27,7 @@ When generating kubeconfig files for Kubelets the client certificate matching th
|
|||
Generate a kubeconfig file for each worker node:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
$ for instance in worker-0 worker-1 worker-2; do
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
|
@ -62,26 +62,24 @@ worker-2.kubeconfig
|
|||
Generate a kubeconfig file for the `kube-proxy` service:
|
||||
|
||||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
$ kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-proxy \
|
||||
--client-certificate=kube-proxy.pem \
|
||||
--client-key=kube-proxy-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
$ kubectl config set-credentials system:kube-proxy \
|
||||
--client-certificate=kube-proxy.pem \
|
||||
--client-key=kube-proxy-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=system:kube-proxy \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
$ kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=system:kube-proxy \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
|
||||
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
|
||||
}
|
||||
$ kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -95,26 +93,24 @@ kube-proxy.kubeconfig
|
|||
Generate a kubeconfig file for the `kube-controller-manager` service:
|
||||
|
||||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
$ kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-controller-manager \
|
||||
--client-certificate=kube-controller-manager.pem \
|
||||
--client-key=kube-controller-manager-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
$ kubectl config set-credentials system:kube-controller-manager \
|
||||
--client-certificate=kube-controller-manager.pem \
|
||||
--client-key=kube-controller-manager-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=system:kube-controller-manager \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
$ kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=system:kube-controller-manager \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
|
||||
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
|
||||
}
|
||||
$ kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -129,26 +125,24 @@ kube-controller-manager.kubeconfig
|
|||
Generate a kubeconfig file for the `kube-scheduler` service:
|
||||
|
||||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
$ kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-scheduler \
|
||||
--client-certificate=kube-scheduler.pem \
|
||||
--client-key=kube-scheduler-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
$ kubectl config set-credentials system:kube-scheduler \
|
||||
--client-certificate=kube-scheduler.pem \
|
||||
--client-key=kube-scheduler-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=system:kube-scheduler \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
$ kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=system:kube-scheduler \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
|
||||
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
|
||||
}
|
||||
$ kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -162,26 +156,24 @@ kube-scheduler.kubeconfig
|
|||
Generate a kubeconfig file for the `admin` user:
|
||||
|
||||
```
|
||||
{
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
$ kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
|
||||
kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
$ kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=admin \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
$ kubectl config set-context default \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=admin \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
|
||||
kubectl config use-context default --kubeconfig=admin.kubeconfig
|
||||
}
|
||||
$ kubectl config use-context default --kubeconfig=admin.kubeconfig
|
||||
```
|
||||
|
||||
Results:
|
||||
|
@ -191,24 +183,38 @@ admin.kubeconfig
|
|||
```
|
||||
|
||||
|
||||
##
|
||||
|
||||
## Distribute the Kubernetes Configuration Files
|
||||
|
||||
Copy the appropriate `kubelet` and `kube-proxy` kubeconfig files to each worker instance:
|
||||
Copy the appropriate kubeconfig files for `kubelet` (`worker-*.kubeconfig`) and `kube-proxy` kubeconfig files to each worker instance:
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
gcloud compute scp ${instance}.kubeconfig kube-proxy.kubeconfig ${instance}:~/
|
||||
done
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep worker
|
||||
worker-0 i-aaaaaaaaaaaaaaaaa ap-northeast-1c 10.240.0.20 aa.aaa.aaa.aaa running
|
||||
worker-1 i-bbbbbbbbbbbbbbbbb ap-northeast-1c 10.240.0.21 b.bbb.b.bbb running
|
||||
worker-2 i-ccccccccccccccccc ap-northeast-1c 10.240.0.22 cc.ccc.cc.ccc running
|
||||
|
||||
$ scp -i ~/.ssh/your_ssh_key worker-0.kubeconfig kube-proxy.kubeconfig ubuntu@aa.aaa.aaa.aaa:~/
|
||||
$ scp -i ~/.ssh/your_ssh_key worker-1.kubeconfig kube-proxy.kubeconfig ubuntu@b.bbb.b.bbb:~/
|
||||
$ scp -i ~/.ssh/your_ssh_key worker-2.kubeconfig kube-proxy.kubeconfig ubuntu@cc.ccc.cc.ccc:~/
|
||||
```
|
||||
|
||||
Copy the appropriate `kube-controller-manager` and `kube-scheduler` kubeconfig files to each controller instance:
|
||||
|
||||
```
|
||||
for instance in controller-0 controller-1 controller-2; do
|
||||
gcloud compute scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep master
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1c 10.240.0.10 xx.xxx.xxx.xxx running
|
||||
master-1 i-yyyyyyyyyyyyyyyyy ap-northeast-1c 10.240.0.11 yy.yyy.yyy.yy running
|
||||
master-2 i-zzzzzzzzzzzzzzzzz ap-northeast-1c 10.240.0.12 zz.zzz.z.zzz running
|
||||
|
||||
$ for masternode in xx.xxx.xxx.xxx yy.yyy.yyy.yy zz.zzz.z.zzz; do
|
||||
scp -i ~/.ssh/your_ssh_key \
|
||||
admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig \
|
||||
ubuntu@${masternode}:~/
|
||||
done
|
||||
```
|
||||
|
||||
Next: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md)
|
||||
Next: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md)
|
|
@ -9,7 +9,7 @@ In this lab you will generate an encryption key and an [encryption config](https
|
|||
Generate an encryption key:
|
||||
|
||||
```
|
||||
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
|
||||
$ ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
|
||||
```
|
||||
|
||||
## The Encryption Config File
|
||||
|
@ -17,7 +17,7 @@ ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
|
|||
Create the `encryption-config.yaml` encryption config file:
|
||||
|
||||
```
|
||||
cat > encryption-config.yaml <<EOF
|
||||
$ cat > encryption-config.yaml <<EOF
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
|
@ -32,12 +32,21 @@ resources:
|
|||
EOF
|
||||
```
|
||||
|
||||
Copy the `encryption-config.yaml` encryption config file to each controller instance:
|
||||
Copy the `encryption-config.yaml` encryption config file to each master instance:
|
||||
|
||||
```
|
||||
for instance in controller-0 controller-1 controller-2; do
|
||||
gcloud compute scp encryption-config.yaml ${instance}:~/
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep master
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1c 10.240.0.10 xx.xxx.xxx.xxx running
|
||||
master-1 i-yyyyyyyyyyyyyyyyy ap-northeast-1c 10.240.0.11 yy.yyy.yyy.yy running
|
||||
master-2 i-zzzzzzzzzzzzzzzzz ap-northeast-1c 10.240.0.12 zz.zzz.z.zzz running
|
||||
|
||||
for masternode in xx.xxx.xxx.xxx yy.yyy.yyy.yy zz.zzz.z.zzz; do
|
||||
scp -i ~/.ssh/your_ssh_key \
|
||||
encryption-config.yaml \
|
||||
ubuntu@${masternode}:~/
|
||||
done
|
||||
```
|
||||
|
||||
Next: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md)
|
||||
Next: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md)
|
|
@ -4,15 +4,21 @@ Kubernetes components are stateless and store cluster state in [etcd](https://gi
|
|||
|
||||
## Prerequisites
|
||||
|
||||
The commands in this lab must be run on each controller instance: `controller-0`, `controller-1`, and `controller-2`. Login to each controller instance using the `gcloud` command. Example:
|
||||
The commands in this lab must be run on each master instance: `master-0`, `master-1`, and `master-2`. Login to each master instance using ssh command. Example:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep master
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1c 10.240.0.10 xx.xxx.xxx.xxx running
|
||||
...
|
||||
|
||||
$ ssh -i ~/.ssh/your_ssh_key ubuntu@xx.xxx.xxx.xxx
|
||||
```
|
||||
|
||||
### Running commands in parallel with tmux
|
||||
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple EC2 instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
||||
|
||||
## Bootstrapping an etcd Cluster Member
|
||||
|
||||
|
@ -21,45 +27,64 @@ gcloud compute ssh controller-0
|
|||
Download the official etcd release binaries from the [etcd](https://github.com/etcd-io/etcd) GitHub project:
|
||||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
master-x $ wget -q --show-progress --https-only --timestamping \
|
||||
"https://github.com/etcd-io/etcd/releases/download/v3.4.0/etcd-v3.4.0-linux-amd64.tar.gz"
|
||||
```
|
||||
|
||||
Extract and install the `etcd` server and the `etcdctl` command line utility:
|
||||
|
||||
```
|
||||
{
|
||||
tar -xvf etcd-v3.4.0-linux-amd64.tar.gz
|
||||
sudo mv etcd-v3.4.0-linux-amd64/etcd* /usr/local/bin/
|
||||
}
|
||||
master-x $ tar -xvf etcd-v3.4.0-linux-amd64.tar.gz
|
||||
master-x $ sudo mv etcd-v3.4.0-linux-amd64/etcd* /usr/local/bin/
|
||||
```
|
||||
|
||||
Results:
|
||||
|
||||
```
|
||||
master-x $ ls /usr/local/bin/
|
||||
etcd etcdctl
|
||||
```
|
||||
|
||||
### Configure the etcd Server
|
||||
|
||||
```
|
||||
{
|
||||
sudo mkdir -p /etc/etcd /var/lib/etcd
|
||||
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
|
||||
}
|
||||
master-x $ sudo mkdir -p /etc/etcd /var/lib/etcd
|
||||
master-x $ sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
|
||||
```
|
||||
|
||||
The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address for the current compute instance:
|
||||
The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address for the current EC2 instance via [instance metadata on EC2 instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html):
|
||||
|
||||
Example:
|
||||
```
|
||||
INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip)
|
||||
master-0 $ curl 169.254.169.254/latest/meta-data/local-ipv4
|
||||
10.240.0.10
|
||||
```
|
||||
|
||||
Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance:
|
||||
Set `INTERNAL_IP` environemnt variable:
|
||||
|
||||
```
|
||||
ETCD_NAME=$(hostname -s)
|
||||
master-x $ INTERNAL_IP=$(curl 169.254.169.254/latest/meta-data/local-ipv4)
|
||||
```
|
||||
|
||||
Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current EC2 instance:
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
master-0 $ hostname -s
|
||||
master-0
|
||||
```
|
||||
|
||||
Set `ETCD_NAME` environemnt variable:
|
||||
|
||||
```
|
||||
master-x $ ETCD_NAME=$(hostname -s)
|
||||
```
|
||||
|
||||
Create the `etcd.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/etcd.service
|
||||
master-x $ cat <<EOF | sudo tee /etc/systemd/system/etcd.service
|
||||
[Unit]
|
||||
Description=etcd
|
||||
Documentation=https://github.com/coreos
|
||||
|
@ -81,7 +106,7 @@ ExecStart=/usr/local/bin/etcd \\
|
|||
--listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
|
||||
--advertise-client-urls https://${INTERNAL_IP}:2379 \\
|
||||
--initial-cluster-token etcd-cluster-0 \\
|
||||
--initial-cluster controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380,controller-2=https://10.240.0.12:2380 \\
|
||||
--initial-cluster master-0=https://10.240.0.10:2380,master-1=https://10.240.0.11:2380,master-2=https://10.240.0.12:2380 \\
|
||||
--initial-cluster-state new \\
|
||||
--data-dir=/var/lib/etcd
|
||||
Restart=on-failure
|
||||
|
@ -95,33 +120,37 @@ EOF
|
|||
### Start the etcd Server
|
||||
|
||||
```
|
||||
{
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable etcd
|
||||
sudo systemctl start etcd
|
||||
}
|
||||
master-x $ sudo systemctl daemon-reload
|
||||
master-x $ sudo systemctl enable etcd
|
||||
master-x $ sudo systemctl start etcd
|
||||
```
|
||||
|
||||
> Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`.
|
||||
> Remember to run the above commands on each master node: `master-0`, `master-1`, and `master-2`.
|
||||
|
||||
Verify etcd servers are running as systemd services.
|
||||
|
||||
```
|
||||
master-x $ systemctl status etcd.service
|
||||
● etcd.service - etcd
|
||||
Loaded: loaded (/etc/systemd/system/etcd.service; enabled; vendor preset: enabled)
|
||||
Active: active (running) since Mon 2020-01-20 18:01:29 UTC; 21s ago
|
||||
...
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
List the etcd cluster members:
|
||||
|
||||
```
|
||||
sudo ETCDCTL_API=3 etcdctl member list \
|
||||
master-0 $ sudo ETCDCTL_API=3 etcdctl member list \
|
||||
--endpoints=https://127.0.0.1:2379 \
|
||||
--cacert=/etc/etcd/ca.pem \
|
||||
--cert=/etc/etcd/kubernetes.pem \
|
||||
--key=/etc/etcd/kubernetes-key.pem
|
||||
|
||||
3a57933972cb5131, started, master-2, https://10.240.0.12:2380, https://10.240.0.12:2379, false
|
||||
f98dc20bce6225a0, started, master-0, https://10.240.0.10:2380, https://10.240.0.10:2379, false
|
||||
ffed16798470cab5, started, master-1, https://10.240.0.11:2380, https://10.240.0.11:2379, false
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
3a57933972cb5131, started, controller-2, https://10.240.0.12:2380, https://10.240.0.12:2379
|
||||
f98dc20bce6225a0, started, controller-0, https://10.240.0.10:2380, https://10.240.0.10:2379
|
||||
ffed16798470cab5, started, controller-1, https://10.240.0.11:2380, https://10.240.0.11:2379
|
||||
```
|
||||
|
||||
Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md)
|
||||
Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md)
|
|
@ -1,33 +1,39 @@
|
|||
# Bootstrapping the Kubernetes Control Plane
|
||||
|
||||
In this lab you will bootstrap the Kubernetes control plane across three compute instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager.
|
||||
In this lab you will bootstrap the Kubernetes control plane across three EC2 instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The commands in this lab must be run on each controller instance: `controller-0`, `controller-1`, and `controller-2`. Login to each controller instance using the `gcloud` command. Example:
|
||||
The commands in this lab must be run on each master instance: `master-0`, `master-1`, and `master-2`. Login to each master instance using ssh. Example:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep master
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1c 10.240.0.10 xx.xxx.xxx.xxx running
|
||||
...
|
||||
|
||||
$ ssh -i ~/.ssh/your_ssh_key ubuntu@xx.xxx.xxx.xxx
|
||||
```
|
||||
|
||||
### Running commands in parallel with tmux
|
||||
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple EC2 instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
||||
|
||||
## Provision the Kubernetes Control Plane
|
||||
|
||||
Create the Kubernetes configuration directory:
|
||||
|
||||
```
|
||||
sudo mkdir -p /etc/kubernetes/config
|
||||
master-x $ sudo mkdir -p /etc/kubernetes/config
|
||||
```
|
||||
|
||||
### Download and Install the Kubernetes Controller Binaries
|
||||
|
||||
Download the official Kubernetes release binaries:
|
||||
Download the official Kubernetes release binaries - `kube-apiserver`, `kube-controller-manager`, `kube-scheduler`, and `kubectl`:
|
||||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
master-x $ wget -q --show-progress --https-only --timestamping \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kube-apiserver" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kube-controller-manager" \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kube-scheduler" \
|
||||
|
@ -37,35 +43,30 @@ wget -q --show-progress --https-only --timestamping \
|
|||
Install the Kubernetes binaries:
|
||||
|
||||
```
|
||||
{
|
||||
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl
|
||||
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
|
||||
}
|
||||
master-x $ chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl
|
||||
master-x $ sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
|
||||
```
|
||||
|
||||
### Configure the Kubernetes API Server
|
||||
|
||||
```
|
||||
{
|
||||
sudo mkdir -p /var/lib/kubernetes/
|
||||
master-x $ sudo mkdir -p /var/lib/kubernetes/
|
||||
|
||||
sudo mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
|
||||
service-account-key.pem service-account.pem \
|
||||
encryption-config.yaml /var/lib/kubernetes/
|
||||
}
|
||||
master-x $ sudo mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
|
||||
service-account-key.pem service-account.pem \
|
||||
encryption-config.yaml /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance:
|
||||
The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current EC2 instance.
|
||||
|
||||
```
|
||||
INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip)
|
||||
master-x $ INTERNAL_IP=$(curl 169.254.169.254/latest/meta-data/local-ipv4)
|
||||
```
|
||||
|
||||
Create the `kube-apiserver.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service
|
||||
master-x $ cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service
|
||||
[Unit]
|
||||
Description=Kubernetes API Server
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
@ -113,13 +114,13 @@ EOF
|
|||
Move the `kube-controller-manager` kubeconfig into place:
|
||||
|
||||
```
|
||||
sudo mv kube-controller-manager.kubeconfig /var/lib/kubernetes/
|
||||
master-x $ sudo mv kube-controller-manager.kubeconfig /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
Create the `kube-controller-manager.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
|
||||
master-x $ cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
|
||||
[Unit]
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
@ -151,13 +152,13 @@ EOF
|
|||
Move the `kube-scheduler` kubeconfig into place:
|
||||
|
||||
```
|
||||
sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/
|
||||
master-x $ sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
Create the `kube-scheduler.yaml` configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml
|
||||
master-x $ cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha1
|
||||
kind: KubeSchedulerConfiguration
|
||||
clientConnection:
|
||||
|
@ -170,7 +171,7 @@ EOF
|
|||
Create the `kube-scheduler.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
|
||||
master-x $ cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
|
||||
[Unit]
|
||||
Description=Kubernetes Scheduler
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
@ -190,30 +191,47 @@ EOF
|
|||
### Start the Controller Services
|
||||
|
||||
```
|
||||
{
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler
|
||||
sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler
|
||||
}
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler
|
||||
$ sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler
|
||||
```
|
||||
|
||||
> Allow up to 10 seconds for the Kubernetes API Server to fully initialize.
|
||||
|
||||
Verify controller services are running.
|
||||
|
||||
```
|
||||
master-x $ for svc in kube-apiserver kube-controller-manager kube-scheduler; \
|
||||
do sudo systemctl status --no-pager $svc | grep -B 3 Active; \
|
||||
done
|
||||
● kube-apiserver.service - Kubernetes API Server
|
||||
Loaded: loaded (/etc/systemd/system/kube-apiserver.service; enabled; vendor preset: enabled)
|
||||
Active: active (running) since Tue 2020-01-21 11:05:50 UTC; 3h 39min ago
|
||||
● kube-controller-manager.service - Kubernetes Controller Manager
|
||||
Loaded: loaded (/etc/systemd/system/kube-controller-manager.service; enabled; vendor preset: enabled)
|
||||
Active: active (running) since Tue 2020-01-21 11:05:50 UTC; 3h 39min ago
|
||||
● kube-scheduler.service - Kubernetes Scheduler
|
||||
Loaded: loaded (/etc/systemd/system/kube-scheduler.service; enabled; vendor preset: enabled)
|
||||
Active: active (running) since Tue 2020-01-21 11:05:50 UTC; 3h 39min ago
|
||||
```
|
||||
|
||||
### Enable HTTP Health Checks
|
||||
|
||||
A [Google Network Load Balancer](https://cloud.google.com/compute/docs/load-balancing/network) will be used to distribute traffic across the three API servers and allow each API server to terminate TLS connections and validate client certificates. The network load balancer only supports HTTP health checks which means the HTTPS endpoint exposed by the API server cannot be used. As a workaround the nginx webserver can be used to proxy HTTP health checks. In this section nginx will be installed and configured to accept HTTP health checks on port `80` and proxy the connections to the API server on `https://127.0.0.1:6443/healthz`.
|
||||
AWS [Network Load Balancer (NLB)](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) will be used to distribute traffic across the three API servers and allow each API server to terminate TLS connections and validate client certificates. We use HTTP NLB health checks instead of HTTPS endpoint exposed by the API server. For health check purpose, the nginx webserver can be used to proxy HTTP health checks. In this section nginx will be installed and configured to accept HTTP health checks on port `80` and proxy the connections to the API server on `https://127.0.0.1:6443/healthz`.
|
||||
|
||||
> The `/healthz` API server endpoint does not require authentication by default.
|
||||
|
||||
Install a basic web server to handle HTTP health checks:
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y nginx
|
||||
master-x $ sudo apt-get update
|
||||
master-x $ sudo apt-get install -y nginx
|
||||
```
|
||||
|
||||
Configure nginx config file to proxy HTTP health check.
|
||||
|
||||
```
|
||||
cat > kubernetes.default.svc.cluster.local <<EOF
|
||||
master-x $ cat > kubernetes.default.svc.cluster.local <<EOF
|
||||
server {
|
||||
listen 80;
|
||||
server_name kubernetes.default.svc.cluster.local;
|
||||
|
@ -224,32 +242,30 @@ server {
|
|||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
master-x $ sudo mv kubernetes.default.svc.cluster.local \
|
||||
/etc/nginx/sites-available/kubernetes.default.svc.cluster.local
|
||||
|
||||
master-x $ sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
sudo mv kubernetes.default.svc.cluster.local \
|
||||
/etc/nginx/sites-available/kubernetes.default.svc.cluster.local
|
||||
|
||||
sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/
|
||||
}
|
||||
```
|
||||
Restart the nginx.
|
||||
|
||||
```
|
||||
sudo systemctl restart nginx
|
||||
master-x $ sudo systemctl restart nginx
|
||||
```
|
||||
|
||||
Then, enable the nginx as a sytemd service.
|
||||
|
||||
```
|
||||
sudo systemctl enable nginx
|
||||
master-x $ sudo systemctl enable nginx
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
```
|
||||
kubectl get componentstatuses --kubeconfig admin.kubeconfig
|
||||
```
|
||||
master-x $ kubectl get componentstatuses --kubeconfig admin.kubeconfig
|
||||
|
||||
```
|
||||
NAME STATUS MESSAGE ERROR
|
||||
controller-manager Healthy ok
|
||||
scheduler Healthy ok
|
||||
|
@ -261,13 +277,11 @@ etcd-1 Healthy {"health": "true"}
|
|||
Test the nginx HTTP health check proxy:
|
||||
|
||||
```
|
||||
curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1/healthz
|
||||
```
|
||||
master-x $ curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1/healthz
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.14.0 (Ubuntu)
|
||||
Date: Sat, 14 Sep 2019 18:34:11 GMT
|
||||
Date: Tue, 21 Jan 2020 14:56:30 GMT
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
Content-Length: 2
|
||||
Connection: keep-alive
|
||||
|
@ -276,24 +290,35 @@ X-Content-Type-Options: nosniff
|
|||
ok
|
||||
```
|
||||
|
||||
> Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`.
|
||||
> Remember to run the above commands on each master node: `master-0`, `master-1`, and `master-2`.
|
||||
|
||||
## RBAC for Kubelet Authorization
|
||||
|
||||
In this section you will configure RBAC permissions to allow the Kubernetes API Server to access the Kubelet API on each worker node. Access to the Kubelet API is required for retrieving metrics, logs, and executing commands in pods.
|
||||
In this section you will configure RBAC permissions to allow the Kubernetes API Server to access the Kubelet API on each worker node (`master<kube-apiserver> --> worker<kubelet>`). Access to the Kubelet API is required for retrieving metrics, logs, and executing commands in pods.
|
||||
|
||||
> This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization.
|
||||
|
||||
The commands in this section will effect the entire cluster and only need to be run once from one of the controller nodes.
|
||||
The commands in this section will effect the entire cluster and only need to be run once from **one of the** master nodes.
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep master
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1c 10.240.0.10 xx.xxx.xxx.xxx running
|
||||
...
|
||||
|
||||
$ ssh -i ~/.ssh/your_ssh_key ubuntu@xx.xxx.xxx.xxx
|
||||
```
|
||||
|
||||
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods:
|
||||
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods.
|
||||
|
||||
> NOTE: you should turn off tmux multiple sync sessions when executing following command on one master node as `ClusterRole` is a cluster-wide resource.
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
master-0 $ hostname
|
||||
master-0
|
||||
|
||||
master-0 $ cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
@ -321,7 +346,7 @@ The Kubernetes API Server authenticates to the Kubelet as the `kubernetes` user
|
|||
Bind the `system:kube-apiserver-to-kubelet` ClusterRole to the `kubernetes` user:
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
master-0 $ cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
@ -338,68 +363,80 @@ subjects:
|
|||
EOF
|
||||
```
|
||||
|
||||
Verify:
|
||||
|
||||
```
|
||||
master-0 $ kubectl get clusterrole,clusterrolebinding | grep kube-apiserver
|
||||
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet 2m2s
|
||||
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver 112s
|
||||
```
|
||||
|
||||
## The Kubernetes Frontend Load Balancer
|
||||
|
||||
In this section you will provision an external load balancer to front the Kubernetes API Servers. The `kubernetes-the-hard-way` static IP address will be attached to the resulting load balancer.
|
||||
|
||||
> The compute instances created in this tutorial will not have permission to complete this section. **Run the following commands from the same machine used to create the compute instances**.
|
||||
|
||||
In this section you will provision an external (internet-facing) [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) in your AWS account to front the Kubernetes API Servers. The `eip-kubernetes-the-hard-way` static IP address will be attached to the resulting load balancer.
|
||||
|
||||
### Provision a Network Load Balancer
|
||||
|
||||
Create the external load balancer network resources:
|
||||
Create the external (internet-facing) network load balancer network resources:
|
||||
|
||||
Reference: [cloudformation/hard-k8s-nlb.cfn.yml](../cloudformation/hard-k8s-nlb.cfn.yml)
|
||||
```yaml
|
||||
Resources:
|
||||
HardK8sNLB:
|
||||
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
|
||||
Properties:
|
||||
Type: network
|
||||
Scheme: internet-facing
|
||||
SubnetMappings:
|
||||
- AllocationId: !ImportValue hard-k8s-eipalloc
|
||||
SubnetId: !ImportValue hard-k8s-subnet
|
||||
|
||||
HardK8sListener:
|
||||
Type: AWS::ElasticLoadBalancingV2::Listener
|
||||
Properties:
|
||||
DefaultActions:
|
||||
- TargetGroupArn: !Ref HardK8sTargetGroup
|
||||
Type: forward
|
||||
LoadBalancerArn: !Ref HardK8sNLB
|
||||
Port: 6443
|
||||
Protocol: TCP
|
||||
|
||||
HardK8sTargetGroup:
|
||||
Type: AWS::ElasticLoadBalancingV2::TargetGroup
|
||||
Properties:
|
||||
VpcId: !ImportValue hard-k8s-vpc
|
||||
Protocol: TCP
|
||||
Port: 6443
|
||||
Targets:
|
||||
- Id: !ImportValue hard-k8s-master-0
|
||||
- Id: !ImportValue hard-k8s-master-1
|
||||
- Id: !ImportValue hard-k8s-master-2
|
||||
HealthCheckPort: "80" # default is "traffic-port", which means 6443.
|
||||
```
|
||||
|
||||
Create NLB via CloudFormation.
|
||||
|
||||
```
|
||||
{
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
|
||||
gcloud compute http-health-checks create kubernetes \
|
||||
--description "Kubernetes Health Check" \
|
||||
--host "kubernetes.default.svc.cluster.local" \
|
||||
--request-path "/healthz"
|
||||
|
||||
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-health-check \
|
||||
--network kubernetes-the-hard-way \
|
||||
--source-ranges 209.85.152.0/22,209.85.204.0/22,35.191.0.0/16 \
|
||||
--allow tcp
|
||||
|
||||
gcloud compute target-pools create kubernetes-target-pool \
|
||||
--http-health-check kubernetes
|
||||
|
||||
gcloud compute target-pools add-instances kubernetes-target-pool \
|
||||
--instances controller-0,controller-1,controller-2
|
||||
|
||||
gcloud compute forwarding-rules create kubernetes-forwarding-rule \
|
||||
--address ${KUBERNETES_PUBLIC_ADDRESS} \
|
||||
--ports 6443 \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--target-pool kubernetes-target-pool
|
||||
}
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-nlb \
|
||||
--template-body file://cloudformation/hard-k8s-nlb.cfn.yml
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
> The compute instances created in this tutorial will not have permission to complete this section. **Run the following commands from the same machine used to create the compute instances**.
|
||||
|
||||
Retrieve the `kubernetes-the-hard-way` static IP address:
|
||||
Retrieve the `eip-kubernetes-the-hard-way` Elastic IP address:
|
||||
|
||||
```
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
$ KUBERNETES_PUBLIC_ADDRESS=$(aws ec2 describe-addresses \
|
||||
--filters "Name=tag:Name,Values=eip-kubernetes-the-hard-way" \
|
||||
--query 'Addresses[0].PublicIp' --output text)
|
||||
```
|
||||
|
||||
Make a HTTP request for the Kubernetes version info:
|
||||
|
||||
```
|
||||
curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version
|
||||
```
|
||||
|
||||
> output
|
||||
This EIP is attached to the NLB we've just created. Make a HTTP request for the Kubernetes version info:
|
||||
|
||||
```
|
||||
$ curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version
|
||||
{
|
||||
"major": "1",
|
||||
"minor": "15",
|
||||
|
@ -410,7 +447,16 @@ curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version
|
|||
"goVersion": "go1.12.9",
|
||||
"compiler": "gc",
|
||||
"platform": "linux/amd64"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Next: [Bootstrapping the Kubernetes Worker Nodes](09-bootstrapping-kubernetes-workers.md)
|
||||
Now we've provisioned master nodes for our k8s cluster. However, we don't have any worker nodes in the cluster.
|
||||
|
||||
```
|
||||
master-0 $ kubectl --kubeconfig admin.kubeconfig get nodes
|
||||
No resources found.
|
||||
```
|
||||
|
||||
Let's configure them next.
|
||||
|
||||
Next: [Bootstrapping the Kubernetes Worker Nodes](09-bootstrapping-kubernetes-workers.md)
|
|
@ -7,22 +7,26 @@ In this lab you will bootstrap three Kubernetes worker nodes. The following comp
|
|||
The commands in this lab must be run on each worker instance: `worker-0`, `worker-1`, and `worker-2`. Login to each worker instance using the `gcloud` command. Example:
|
||||
|
||||
```
|
||||
gcloud compute ssh worker-0
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep worker
|
||||
worker-0 i-aaaaaaaaaaaaaaaaa ap-northeast-1c 10.240.0.20 x.xxx.xx.xx running
|
||||
...
|
||||
|
||||
$ ssh -i ~/.ssh/your_ssh_key ubuntu@x.xxx.xx.xx
|
||||
```
|
||||
|
||||
### Running commands in parallel with tmux
|
||||
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
||||
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple EC2 instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
|
||||
|
||||
## Provisioning a Kubernetes Worker Node
|
||||
|
||||
Install the OS dependencies:
|
||||
|
||||
```
|
||||
{
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install socat conntrack ipset
|
||||
}
|
||||
worker-x $ sudo apt-get update
|
||||
worker-x $ sudo apt-get -y install socat conntrack ipset
|
||||
```
|
||||
|
||||
> The socat binary enables support for the `kubectl port-forward` command.
|
||||
|
@ -34,13 +38,13 @@ By default the kubelet will fail to start if [swap](https://help.ubuntu.com/comm
|
|||
Verify if swap is enabled:
|
||||
|
||||
```
|
||||
sudo swapon --show
|
||||
worker-x $ sudo swapon --show
|
||||
```
|
||||
|
||||
If output is empthy then swap is not enabled. If swap is enabled run the following command to disable swap immediately:
|
||||
|
||||
```
|
||||
sudo swapoff -a
|
||||
worker-x $ sudo swapoff -a
|
||||
```
|
||||
|
||||
> To ensure swap remains off after reboot consult your Linux distro documentation.
|
||||
|
@ -48,7 +52,7 @@ sudo swapoff -a
|
|||
### Download and Install Worker Binaries
|
||||
|
||||
```
|
||||
wget -q --show-progress --https-only --timestamping \
|
||||
worker-x $ wget -q --show-progress --https-only --timestamping \
|
||||
https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.15.0/crictl-v1.15.0-linux-amd64.tar.gz \
|
||||
https://github.com/opencontainers/runc/releases/download/v1.0.0-rc8/runc.amd64 \
|
||||
https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz \
|
||||
|
@ -61,7 +65,7 @@ wget -q --show-progress --https-only --timestamping \
|
|||
Create the installation directories:
|
||||
|
||||
```
|
||||
sudo mkdir -p \
|
||||
worker-x $ sudo mkdir -p \
|
||||
/etc/cni/net.d \
|
||||
/opt/cni/bin \
|
||||
/var/lib/kubelet \
|
||||
|
@ -73,31 +77,49 @@ sudo mkdir -p \
|
|||
Install the worker binaries:
|
||||
|
||||
```
|
||||
{
|
||||
mkdir containerd
|
||||
tar -xvf crictl-v1.15.0-linux-amd64.tar.gz
|
||||
tar -xvf containerd-1.2.9.linux-amd64.tar.gz -C containerd
|
||||
sudo tar -xvf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/
|
||||
sudo mv runc.amd64 runc
|
||||
chmod +x crictl kubectl kube-proxy kubelet runc
|
||||
sudo mv crictl kubectl kube-proxy kubelet runc /usr/local/bin/
|
||||
sudo mv containerd/bin/* /bin/
|
||||
}
|
||||
worker-x $ mkdir containerd
|
||||
worker-x $ tar -xvf crictl-v1.15.0-linux-amd64.tar.gz
|
||||
worker-x $ tar -xvf containerd-1.2.9.linux-amd64.tar.gz -C containerd
|
||||
worker-x $ sudo tar -xvf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/
|
||||
worker-x $ sudo mv runc.amd64 runc
|
||||
worker-x $ chmod +x crictl kubectl kube-proxy kubelet runc
|
||||
worker-x $ sudo mv crictl kubectl kube-proxy kubelet runc /usr/local/bin/
|
||||
worker-x $ sudo mv containerd/bin/* /bin/
|
||||
```
|
||||
|
||||
Verify:
|
||||
|
||||
```
|
||||
worker-x $ ls /opt/cni/bin/
|
||||
bandwidth bridge dhcp firewall flannel host-device host-local ipvlan loopback macvlan portmap ptp sbr static tuning vlan
|
||||
|
||||
worker-x $ ls /bin/container*
|
||||
/bin/containerd /bin/containerd-shim /bin/containerd-shim-runc-v1 /bin/containerd-stress
|
||||
worker-x $ ls /usr/local/bin/
|
||||
crictl kube-proxy kubectl kubelet runc
|
||||
```
|
||||
|
||||
### Configure CNI Networking
|
||||
|
||||
Retrieve the Pod CIDR range for the current compute instance:
|
||||
Retrieve the Pod CIDR range for the current EC2 instance. Remember that we've put Pod CIDR range by executing `echo 10.200.x.0/24 > /opt/pod_cidr.txt` in [cloudformation/worker-nodes.cfn.yml](../cloudformation/hard-k8s-worker-nodes.cfn.yml) via UserData.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
POD_CIDR=$(curl -s -H "Metadata-Flavor: Google" \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/pod-cidr)
|
||||
worker-0 $ cat /opt/pod_cidr.txt
|
||||
10.200.0.0/24
|
||||
```
|
||||
|
||||
Save these ranges in the environment variable named `POD_CIDR`.
|
||||
|
||||
```
|
||||
worker-x $ POD_CIDR=$(cat /opt/pod_cidr.txt)
|
||||
```
|
||||
|
||||
Create the `bridge` network configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
|
||||
worker-x $ cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "bridge",
|
||||
|
@ -119,7 +141,7 @@ EOF
|
|||
Create the `loopback` network configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
|
||||
worker-x $ cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "lo",
|
||||
|
@ -133,11 +155,11 @@ EOF
|
|||
Create the `containerd` configuration file:
|
||||
|
||||
```
|
||||
sudo mkdir -p /etc/containerd/
|
||||
worker-x $ sudo mkdir -p /etc/containerd/
|
||||
```
|
||||
|
||||
```
|
||||
cat << EOF | sudo tee /etc/containerd/config.toml
|
||||
worker-x $ cat << EOF | sudo tee /etc/containerd/config.toml
|
||||
[plugins]
|
||||
[plugins.cri.containerd]
|
||||
snapshotter = "overlayfs"
|
||||
|
@ -151,7 +173,7 @@ EOF
|
|||
Create the `containerd.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/containerd.service
|
||||
worker-x $ cat <<EOF | sudo tee /etc/systemd/system/containerd.service
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
|
@ -176,18 +198,23 @@ EOF
|
|||
|
||||
### Configure the Kubelet
|
||||
|
||||
Check an environment variable `$HOSTNAME`.
|
||||
|
||||
```
|
||||
{
|
||||
sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/
|
||||
sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig
|
||||
sudo mv ca.pem /var/lib/kubernetes/
|
||||
}
|
||||
worker-0 $ echo $HOSTNAME
|
||||
worker-0
|
||||
```
|
||||
|
||||
```
|
||||
worker-x $ sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/
|
||||
worker-x $ sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig
|
||||
worker-x $ sudo mv ca.pem /var/lib/kubernetes/
|
||||
```
|
||||
|
||||
Create the `kubelet-config.yaml` configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
|
||||
worker-x $ cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
authentication:
|
||||
|
@ -215,7 +242,7 @@ EOF
|
|||
Create the `kubelet.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
||||
worker-x $ cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
@ -240,16 +267,19 @@ WantedBy=multi-user.target
|
|||
EOF
|
||||
```
|
||||
|
||||
See details of kubelet options in [the document](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). Note that `--cni-conf-dir` default is `/etc/cni/net.d`, and `--cni-bin-dir` default is `/opt/cni/bin`.
|
||||
|
||||
|
||||
### Configure the Kubernetes Proxy
|
||||
|
||||
```
|
||||
sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
|
||||
worker-x $ sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
|
||||
```
|
||||
|
||||
Create the `kube-proxy-config.yaml` configuration file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
|
||||
worker-x $ cat <<EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml
|
||||
kind: KubeProxyConfiguration
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
clientConnection:
|
||||
|
@ -262,7 +292,7 @@ EOF
|
|||
Create the `kube-proxy.service` systemd unit file:
|
||||
|
||||
```
|
||||
cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
|
||||
worker-x $ cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
|
||||
[Unit]
|
||||
Description=Kubernetes Kube Proxy
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
@ -281,33 +311,32 @@ EOF
|
|||
### Start the Worker Services
|
||||
|
||||
```
|
||||
{
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable containerd kubelet kube-proxy
|
||||
sudo systemctl start containerd kubelet kube-proxy
|
||||
}
|
||||
worker-x $ sudo systemctl daemon-reload
|
||||
worker-x $ sudo systemctl enable containerd kubelet kube-proxy
|
||||
worker-x $ sudo systemctl start containerd kubelet kube-proxy
|
||||
```
|
||||
|
||||
> Remember to run the above commands on each worker node: `worker-0`, `worker-1`, and `worker-2`.
|
||||
|
||||
## Verification
|
||||
|
||||
> The compute instances created in this tutorial will not have permission to complete this section. Run the following commands from the same machine used to create the compute instances.
|
||||
> The EC2 instances created in this tutorial will not have permission to complete this section. Run the following commands from the same machine used to create the EC2 instances.
|
||||
|
||||
List the registered Kubernetes nodes:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0 \
|
||||
--command "kubectl get nodes --kubeconfig admin.kubeconfig"
|
||||
$ aws ec2 describe-instances --filters Name=vpc-id,Values=vpc-xxxxxxxxxxxxxxxxx \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort | grep master-0
|
||||
master-0 i-xxxxxxxxxxxxxxxxx ap-northeast-1d 10.240.0.10 xx.xxx.xx.xx running
|
||||
|
||||
$ ssh -i ~/.ssh/your_ssh_key ubuntu@xx.xxx.xx.xx "kubectl get nodes --kubeconfig admin.kubeconfig"
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-0 Ready <none> 2m18s v1.15.3
|
||||
worker-1 Ready <none> 2m18s v1.15.3
|
||||
worker-2 Ready <none> 2m18s v1.15.3
|
||||
```
|
||||
|
||||
> output
|
||||
Now 3 workers have been registered to the cluster.
|
||||
|
||||
```
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-0 Ready <none> 15s v1.15.3
|
||||
worker-1 Ready <none> 15s v1.15.3
|
||||
worker-2 Ready <none> 15s v1.15.3
|
||||
```
|
||||
|
||||
Next: [Configuring kubectl for Remote Access](10-configuring-kubectl.md)
|
||||
Next: [Configuring kubectl for Remote Access](10-configuring-kubectl.md)
|
|
@ -11,26 +11,24 @@ Each kubeconfig requires a Kubernetes API Server to connect to. To support high
|
|||
Generate a kubeconfig file suitable for authenticating as the `admin` user:
|
||||
|
||||
```
|
||||
{
|
||||
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \
|
||||
--region $(gcloud config get-value compute/region) \
|
||||
--format 'value(address)')
|
||||
$ KUBERNETES_PUBLIC_ADDRESS=$(aws ec2 describe-addresses \
|
||||
--filters "Name=tag:Name,Values=eip-kubernetes-the-hard-way" \
|
||||
--query 'Addresses[0].PublicIp' --output text)
|
||||
|
||||
kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443
|
||||
$ kubectl config set-cluster kubernetes-the-hard-way \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443
|
||||
|
||||
kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem
|
||||
$ kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem
|
||||
|
||||
kubectl config set-context kubernetes-the-hard-way \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=admin
|
||||
$ kubectl config set-context kubernetes-the-hard-way \
|
||||
--cluster=kubernetes-the-hard-way \
|
||||
--user=admin
|
||||
|
||||
kubectl config use-context kubernetes-the-hard-way
|
||||
}
|
||||
$ kubectl config use-context kubernetes-the-hard-way
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
@ -38,12 +36,7 @@ Generate a kubeconfig file suitable for authenticating as the `admin` user:
|
|||
Check the health of the remote Kubernetes cluster:
|
||||
|
||||
```
|
||||
kubectl get componentstatuses
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
$ kubectl get componentstatuses
|
||||
NAME STATUS MESSAGE ERROR
|
||||
controller-manager Healthy ok
|
||||
scheduler Healthy ok
|
||||
|
@ -55,16 +48,11 @@ etcd-0 Healthy {"health":"true"}
|
|||
List the nodes in the remote Kubernetes cluster:
|
||||
|
||||
```
|
||||
kubectl get nodes
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-0 Ready <none> 6m38s v1.15.3
|
||||
worker-1 Ready <none> 6m38s v1.15.3
|
||||
worker-2 Ready <none> 6m38s v1.15.3
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
worker-0 Ready <none> 2m9s v1.15.3
|
||||
worker-1 Ready <none> 2m9s v1.15.3
|
||||
worker-2 Ready <none> 2m9s v1.15.3
|
||||
```
|
||||
|
||||
Next: [Provisioning Pod Network Routes](11-pod-network-routes.md)
|
||||
Next: [Provisioning Pod Network Routes](11-pod-network-routes.md)
|
|
@ -1,60 +1,83 @@
|
|||
# Provisioning Pod Network Routes
|
||||
|
||||
Pods scheduled to a node receive an IP address from the node's Pod CIDR range. At this point pods can not communicate with other pods running on different nodes due to missing network [routes](https://cloud.google.com/compute/docs/vpc/routes).
|
||||
Pods scheduled to a node receive an IP address from the node's Pod CIDR range. At this point pods can not communicate with other pods running on different nodes due to missing [network routes](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html).
|
||||
|
||||
In this lab you will create a route for each worker node that maps the node's Pod CIDR range to the node's internal IP address.
|
||||
In this lab, firstly you will create a route for each worker node that maps the node's Pod CIDR range to the node's internal IP address.
|
||||
|
||||
> There are [other ways](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-achieve-this) to implement the Kubernetes networking model.
|
||||
|
||||
|
||||
## The Routing Table
|
||||
|
||||
In this section you will gather the information required to create routes in the `kubernetes-the-hard-way` VPC network.
|
||||
In this section you will gather the information required to create new routes in the VPC network. Remember that we have created a route table for our dedicated subnet for the k8s cluster. What we need to do in this section would be adding new routes resources into the existing route table (which we can refer with `!ImportValue hard-k8s-rtb`)
|
||||
|
||||
Print the internal IP address and Pod CIDR range for each worker instance:
|
||||
Reference: [cloudformation/hard-k8s-pod-routes.cfn.yml](../cloudformation/hard-k8s-pod-routes.cfn.yml)
|
||||
```yaml
|
||||
Resources:
|
||||
RouteWorker0:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
DestinationCidrBlock: 10.200.0.0/24
|
||||
RouteTableId: !ImportValue hard-k8s-rtb
|
||||
InstanceId: !ImportValue hard-k8s-worker-0
|
||||
|
||||
```
|
||||
for instance in worker-0 worker-1 worker-2; do
|
||||
gcloud compute instances describe ${instance} \
|
||||
--format 'value[separator=" "](networkInterfaces[0].networkIP,metadata.items[0].value)'
|
||||
done
|
||||
RouteWorker1:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
DestinationCidrBlock: 10.200.1.0/24
|
||||
RouteTableId: !ImportValue hard-k8s-rtb
|
||||
InstanceId: !ImportValue hard-k8s-worker-1
|
||||
|
||||
RouteWorker2:
|
||||
Type: AWS::EC2::Route
|
||||
Properties:
|
||||
DestinationCidrBlock: 10.200.2.0/24
|
||||
RouteTableId: !ImportValue hard-k8s-rtb
|
||||
InstanceId: !ImportValue hard-k8s-worker-2
|
||||
```
|
||||
|
||||
> output
|
||||
Now create network resources via AWS CLI command:
|
||||
|
||||
```
|
||||
10.240.0.20 10.200.0.0/24
|
||||
10.240.0.21 10.200.1.0/24
|
||||
10.240.0.22 10.200.2.0/24
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-pod-routes \
|
||||
--template-body file://cloudformation/hard-k8s-pod-routes.cfn.yml
|
||||
```
|
||||
|
||||
## Routes
|
||||
|
||||
Create network routes for each worker instance:
|
||||
Verify:
|
||||
|
||||
```
|
||||
for i in 0 1 2; do
|
||||
gcloud compute routes create kubernetes-route-10-200-${i}-0-24 \
|
||||
--network kubernetes-the-hard-way \
|
||||
--next-hop-address 10.240.0.2${i} \
|
||||
--destination-range 10.200.${i}.0/24
|
||||
done
|
||||
$ aws cloudformation describe-stacks \
|
||||
--stack-name hard-k8s-network \
|
||||
--query 'Stacks[0].Outputs' --output table
|
||||
-----------------------------------------------------------------
|
||||
| DescribeStacks |
|
||||
+-----------------+----------------+----------------------------+
|
||||
| ExportName | OutputKey | OutputValue |
|
||||
+-----------------+----------------+----------------------------+
|
||||
| hard-k8s-rtb | RouteTableId | rtb-sssssssssssssssss |
|
||||
| hard-k8s-vpc | VpcId | vpc-ppppppppppppppppp |
|
||||
| hard-k8s-subnet| SubnetId | subnet-qqqqqqqqqqqqqqqqq |
|
||||
+-----------------+----------------+----------------------------+
|
||||
|
||||
$ ROUTE_TABLE_ID=$(aws cloudformation describe-stacks \
|
||||
--stack-name hard-k8s-network \
|
||||
--query 'Stacks[0].Outputs[?ExportName==`hard-k8s-rtb`].OutputValue' --output text)
|
||||
|
||||
$ aws ec2 describe-route-tables \
|
||||
--route-table-ids $ROUTE_TABLE_ID \
|
||||
--query 'RouteTables[0].Routes[].[DestinationCidrBlock,InstanceId,GatewayId]' --output table
|
||||
-------------------------------------------------------------------
|
||||
| DescribeRouteTables |
|
||||
+---------------+-----------------------+-------------------------+
|
||||
| 10.200.0.0/24| i-aaaaaaaaaaaaaaaaa | None | # worker-0
|
||||
| 10.200.1.0/24| i-bbbbbbbbbbbbbbbbb | None | # worker-1
|
||||
| 10.200.2.0/24| i-ccccccccccccccccc | None | # worker-2
|
||||
| 10.240.0.0/16| None | local | # inter-vpc traffic among 10.240.0.0/16 range
|
||||
| 0.0.0.0/0 | None | igw-xxxxxxxxxxxxxxxxx | # default internet gateway
|
||||
+---------------+-----------------------+-------------------------+
|
||||
```
|
||||
|
||||
List the routes in the `kubernetes-the-hard-way` VPC network:
|
||||
So this route table ensure traffic to pods working on worker-0, which has IP CIDR range `10.200.0.0/24`, should be routed to worker-0 node.
|
||||
|
||||
```
|
||||
gcloud compute routes list --filter "network: kubernetes-the-hard-way"
|
||||
```
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME NETWORK DEST_RANGE NEXT_HOP PRIORITY
|
||||
default-route-081879136902de56 kubernetes-the-hard-way 10.240.0.0/24 kubernetes-the-hard-way 1000
|
||||
default-route-55199a5aa126d7aa kubernetes-the-hard-way 0.0.0.0/0 default-internet-gateway 1000
|
||||
kubernetes-route-10-200-0-0-24 kubernetes-the-hard-way 10.200.0.0/24 10.240.0.20 1000
|
||||
kubernetes-route-10-200-1-0-24 kubernetes-the-hard-way 10.200.1.0/24 10.240.0.21 1000
|
||||
kubernetes-route-10-200-2-0-24 kubernetes-the-hard-way 10.200.2.0/24 10.240.0.22 1000
|
||||
```
|
||||
|
||||
Next: [Deploying the DNS Cluster Add-on](12-dns-addon.md)
|
||||
Next: [Deploying the DNS Cluster Add-on](12-dns-addon.md)
|
|
@ -4,15 +4,21 @@ In this lab you will deploy the [DNS add-on](https://kubernetes.io/docs/concepts
|
|||
|
||||
## The DNS Cluster Add-on
|
||||
|
||||
First check registered worker nodes:
|
||||
|
||||
```
|
||||
$ kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
worker-0 Ready <none> 26h v1.15.3 10.240.0.20 <none> Ubuntu 18.04.3 LTS 4.15.0-1051-aws containerd://1.2.9
|
||||
worker-1 Ready <none> 26h v1.15.3 10.240.0.21 <none> Ubuntu 18.04.3 LTS 4.15.0-1051-aws containerd://1.2.9
|
||||
worker-2 Ready <none> 26h v1.15.3 10.240.0.22 <none> Ubuntu 18.04.3 LTS 4.15.0-1051-aws containerd://1.2.9
|
||||
```
|
||||
|
||||
Deploy the `coredns` cluster add-on:
|
||||
|
||||
```
|
||||
kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns.yaml
|
||||
```
|
||||
$ kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns.yaml
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
serviceaccount/coredns created
|
||||
clusterrole.rbac.authorization.k8s.io/system:coredns created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
|
||||
|
@ -24,53 +30,46 @@ service/kube-dns created
|
|||
List the pods created by the `kube-dns` deployment:
|
||||
|
||||
```
|
||||
kubectl get pods -l k8s-app=kube-dns -n kube-system
|
||||
$ kubectl get pods -l k8s-app=kube-dns -n kube-system -o wide
|
||||
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
coredns-5fb99965-gk2j7 1/1 Running 0 98s 10.200.1.3 worker-1 <none> <none>
|
||||
coredns-5fb99965-w6hxj 1/1 Running 0 98s 10.200.2.3 worker-2 <none> <none>
|
||||
```
|
||||
|
||||
> output
|
||||
Note that pods are running in pre-defined POD CIDR range. Your results may differ as we've not specified on which worker node each pod should run.
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
coredns-699f8ddd77-94qv9 1/1 Running 0 20s
|
||||
coredns-699f8ddd77-gtcgb 1/1 Running 0 20s
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
Create a `busybox` deployment:
|
||||
|
||||
```
|
||||
kubectl run --generator=run-pod/v1 busybox --image=busybox:1.28 --command -- sleep 3600
|
||||
$ kubectl run --generator=run-pod/v1 busybox --image=busybox:1.28 --command -- sleep 3600
|
||||
|
||||
pod/busybox created
|
||||
```
|
||||
|
||||
List the pod created by the `busybox` deployment:
|
||||
|
||||
```
|
||||
kubectl get pods -l run=busybox
|
||||
```
|
||||
$ kubectl get pods -l run=busybox -o wide
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
busybox 1/1 Running 0 3s
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
busybox 1/1 Running 0 3m45s 10.200.2.2 worker-2 <none> <none>
|
||||
```
|
||||
|
||||
Retrieve the full name of the `busybox` pod:
|
||||
|
||||
```
|
||||
POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}")
|
||||
$ POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}")
|
||||
```
|
||||
|
||||
Execute a DNS lookup for the `kubernetes` service inside the `busybox` pod:
|
||||
|
||||
```
|
||||
kubectl exec -ti $POD_NAME -- nslookup kubernetes
|
||||
```
|
||||
$ kubectl exec -ti $POD_NAME -- nslookup kubernetes
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
Server: 10.32.0.10
|
||||
Address 1: 10.32.0.10 kube-dns.kube-system.svc.cluster.local
|
||||
|
||||
|
@ -78,4 +77,4 @@ Name: kubernetes
|
|||
Address 1: 10.32.0.1 kubernetes.default.svc.cluster.local
|
||||
```
|
||||
|
||||
Next: [Smoke Test](13-smoke-test.md)
|
||||
Next: [Smoke Test](13-smoke-test.md)
|
|
@ -9,40 +9,40 @@ In this section you will verify the ability to [encrypt secret data at rest](htt
|
|||
Create a generic secret:
|
||||
|
||||
```
|
||||
kubectl create secret generic kubernetes-the-hard-way \
|
||||
$ kubectl create secret generic kubernetes-the-hard-way \
|
||||
--from-literal="mykey=mydata"
|
||||
```
|
||||
|
||||
Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd:
|
||||
|
||||
```
|
||||
gcloud compute ssh controller-0 \
|
||||
--command "sudo ETCDCTL_API=3 etcdctl get \
|
||||
$ ssh <<master-0>>
|
||||
```
|
||||
|
||||
```
|
||||
master-0 $ sudo ETCDCTL_API=3 etcdctl get \
|
||||
--endpoints=https://127.0.0.1:2379 \
|
||||
--cacert=/etc/etcd/ca.pem \
|
||||
--cert=/etc/etcd/kubernetes.pem \
|
||||
--key=/etc/etcd/kubernetes-key.pem\
|
||||
/registry/secrets/default/kubernetes-the-hard-way | hexdump -C"
|
||||
```
|
||||
/registry/secrets/default/kubernetes-the-hard-way | hexdump -C
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
00000000 2f 72 65 67 69 73 74 72 79 2f 73 65 63 72 65 74 |/registry/secret|
|
||||
00000010 73 2f 64 65 66 61 75 6c 74 2f 6b 75 62 65 72 6e |s/default/kubern|
|
||||
00000020 65 74 65 73 2d 74 68 65 2d 68 61 72 64 2d 77 61 |etes-the-hard-wa|
|
||||
00000030 79 0a 6b 38 73 3a 65 6e 63 3a 61 65 73 63 62 63 |y.k8s:enc:aescbc|
|
||||
00000040 3a 76 31 3a 6b 65 79 31 3a 44 ac 6e ac 11 2f 28 |:v1:key1:D.n../(|
|
||||
00000050 02 46 3d ad 9d cd 68 be e4 cc 63 ae 13 e4 99 e8 |.F=...h...c.....|
|
||||
00000060 6e 55 a0 fd 9d 33 7a b1 17 6b 20 19 23 dc 3e 67 |nU...3z..k .#.>g|
|
||||
00000070 c9 6c 47 fa 78 8b 4d 28 cd d1 71 25 e9 29 ec 88 |.lG.x.M(..q%.)..|
|
||||
00000080 7f c9 76 b6 31 63 6e ea ac c5 e4 2f 32 d7 a6 94 |..v.1cn..../2...|
|
||||
00000090 3c 3d 97 29 40 5a ee e1 ef d6 b2 17 01 75 a4 a3 |<=.)@Z.......u..|
|
||||
000000a0 e2 c2 70 5b 77 1a 0b ec 71 c3 87 7a 1f 68 73 03 |..p[w...q..z.hs.|
|
||||
000000b0 67 70 5e ba 5e 65 ff 6f 0c 40 5a f9 2a bd d6 0e |gp^.^e.o.@Z.*...|
|
||||
000000c0 44 8d 62 21 1a 30 4f 43 b8 03 69 52 c0 b7 2e 16 |D.b!.0OC..iR....|
|
||||
000000d0 14 a5 91 21 29 fa 6e 03 47 e2 06 25 45 7c 4f 8f |...!).n.G..%E|O.|
|
||||
000000e0 6e bb 9d 3b e9 e5 2d 9e 3e 0a |n..;..-.>.|
|
||||
00000040 3a 76 31 3a 6b 65 79 31 3a 24 a3 f7 aa 22 b1 d2 |:v1:key1:$..."..|
|
||||
00000050 7b 9f 89 aa 53 a6 a0 5e e4 5f 1f ea b2 d6 c4 de |{...S..^._......|
|
||||
00000060 c2 80 02 a9 57 e7 e6 b0 46 57 9f fa c8 dd 89 c3 |....W...FW......|
|
||||
00000070 ef 15 58 71 ab ec c3 6a 9f 7e da b9 d8 94 2e 0d |..Xq...j.~......|
|
||||
00000080 85 a3 ff 94 56 62 a1 dd f6 4b a6 47 d1 46 b6 92 |....Vb...K.G.F..|
|
||||
00000090 27 9f 4d e0 5c 81 4e b4 fe 2e ca d5 5b d2 be 07 |'.M.\.N.....[...|
|
||||
000000a0 1d 4e 38 b8 2b 03 37 0d 65 84 e2 8c de 87 80 c8 |.N8.+.7.e.......|
|
||||
000000b0 9c f9 08 0e 4f 29 fc 5f b3 e8 10 99 b4 00 b3 ad |....O)._........|
|
||||
000000c0 6c dd 81 28 a0 2d a6 82 41 0e 7d ba a8 a0 7d d6 |l..(.-..A.}...}.|
|
||||
000000d0 15 f0 80 a5 1d 27 33 aa a1 b5 e0 d1 e7 5b 63 22 |.....'3......[c"|
|
||||
000000e0 9a 10 68 42 e6 d4 9f 0d ab 0a |..hB......|
|
||||
000000ea
|
||||
```
|
||||
|
||||
The etcd key should be prefixed with `k8s:enc:aescbc:v1:key1`, which indicates the `aescbc` provider was used to encrypt the data with the `key1` encryption key.
|
||||
|
@ -54,18 +54,14 @@ In this section you will verify the ability to create and manage [Deployments](h
|
|||
Create a deployment for the [nginx](https://nginx.org/en/) web server:
|
||||
|
||||
```
|
||||
kubectl create deployment nginx --image=nginx
|
||||
$ kubectl create deployment nginx --image=nginx
|
||||
```
|
||||
|
||||
List the pod created by the `nginx` deployment:
|
||||
|
||||
```
|
||||
kubectl get pods -l app=nginx
|
||||
```
|
||||
$ kubectl get pods -l app=nginx
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
nginx-554b9c67f9-vt5rn 1/1 Running 0 10s
|
||||
```
|
||||
|
@ -77,18 +73,14 @@ In this section you will verify the ability to access applications remotely usin
|
|||
Retrieve the full name of the `nginx` pod:
|
||||
|
||||
```
|
||||
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
|
||||
$ POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
|
||||
```
|
||||
|
||||
Forward port `8080` on your local machine to port `80` of the `nginx` pod:
|
||||
|
||||
```
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
```
|
||||
$ kubectl port-forward $POD_NAME 8080:80
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
Forwarding from 127.0.0.1:8080 -> 80
|
||||
Forwarding from [::1]:8080 -> 80
|
||||
```
|
||||
|
@ -96,20 +88,16 @@ Forwarding from [::1]:8080 -> 80
|
|||
In a new terminal make an HTTP request using the forwarding address:
|
||||
|
||||
```
|
||||
curl --head http://127.0.0.1:8080
|
||||
```
|
||||
$ curl --head http://127.0.0.1:8080
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.17.3
|
||||
Date: Sat, 14 Sep 2019 21:10:11 GMT
|
||||
Server: nginx/1.17.8
|
||||
Date: Fri, 24 Jan 2020 19:31:41 GMT
|
||||
Content-Type: text/html
|
||||
Content-Length: 612
|
||||
Last-Modified: Tue, 13 Aug 2019 08:50:00 GMT
|
||||
Last-Modified: Tue, 21 Jan 2020 13:36:08 GMT
|
||||
Connection: keep-alive
|
||||
ETag: "5d5279b8-264"
|
||||
ETag: "5e26fe48-264"
|
||||
Accept-Ranges: bytes
|
||||
```
|
||||
|
||||
|
@ -129,12 +117,8 @@ In this section you will verify the ability to [retrieve container logs](https:/
|
|||
Print the `nginx` pod logs:
|
||||
|
||||
```
|
||||
kubectl logs $POD_NAME
|
||||
```
|
||||
$ kubectl logs $POD_NAME
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
127.0.0.1 - - [14/Sep/2019:21:10:11 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.52.1" "-"
|
||||
```
|
||||
|
||||
|
@ -145,13 +129,9 @@ In this section you will verify the ability to [execute commands in a container]
|
|||
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
|
||||
|
||||
```
|
||||
kubectl exec -ti $POD_NAME -- nginx -v
|
||||
```
|
||||
$ kubectl exec -ti $POD_NAME -- nginx -v
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
nginx version: nginx/1.17.3
|
||||
nginx version: nginx/1.17.8
|
||||
```
|
||||
|
||||
## Services
|
||||
|
@ -161,7 +141,7 @@ In this section you will verify the ability to expose applications using a [Serv
|
|||
Expose the `nginx` deployment using a [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) service:
|
||||
|
||||
```
|
||||
kubectl expose deployment nginx --port 80 --type NodePort
|
||||
$ kubectl expose deployment nginx --port 80 --type NodePort
|
||||
```
|
||||
|
||||
> The LoadBalancer service type can not be used because your cluster is not configured with [cloud provider integration](https://kubernetes.io/docs/getting-started-guides/scratch/#cloud-provider). Setting up cloud provider integration is out of scope for this tutorial.
|
||||
|
@ -169,43 +149,77 @@ kubectl expose deployment nginx --port 80 --type NodePort
|
|||
Retrieve the node port assigned to the `nginx` service:
|
||||
|
||||
```
|
||||
NODE_PORT=$(kubectl get svc nginx \
|
||||
$ NODE_PORT=$(kubectl get svc nginx \
|
||||
--output=jsonpath='{range .spec.ports[0]}{.nodePort}')
|
||||
$ echo $NODE_PORT
|
||||
30712
|
||||
```
|
||||
|
||||
Create a firewall rule that allows remote access to the `nginx` node port:
|
||||
The value of `$NODE_PORT` varies. Create a route that allows remote access to the `nginx` node port with following CloudFormation template:
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-nginx-service \
|
||||
--allow=tcp:${NODE_PORT} \
|
||||
--network kubernetes-the-hard-way
|
||||
Reference: [cloudformation/hard-k8s-nodeport-sg-ingress](../cloudformation/hard-k8s-nodeport-sg-ingress.cfn.yml)
|
||||
```yaml
|
||||
Parameters:
|
||||
ParamNodePort:
|
||||
Type: Number
|
||||
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport
|
||||
MinValue: 30000
|
||||
MaxValue: 32767
|
||||
|
||||
Resources:
|
||||
HardK8sSmokeIngress:
|
||||
Type: AWS::EC2::SecurityGroupIngress
|
||||
Properties:
|
||||
GroupId: !ImportValue hard-k8s-sg
|
||||
CidrIp: 0.0.0.0/0
|
||||
IpProtocol: tcp
|
||||
FromPort: !Ref ParamNodePort
|
||||
ToPort: !Ref ParamNodePort
|
||||
```
|
||||
|
||||
Retrieve the external IP address of a worker instance:
|
||||
You should pass `$NODE_PORT` environment variable as a CloudFormation stack parameter:
|
||||
|
||||
```
|
||||
EXTERNAL_IP=$(gcloud compute instances describe worker-0 \
|
||||
--format 'value(networkInterfaces[0].accessConfigs[0].natIP)')
|
||||
$ aws cloudformation create-stack \
|
||||
--stack-name hard-k8s-nodeport-sg-ingress \
|
||||
--parameters ParameterKey=ParamNodePort,ParameterValue=$NODE_PORT \
|
||||
--template-body file://cloudformation/hard-k8s-nodeport-sg-ingress.cfn.yml
|
||||
```
|
||||
|
||||
Retrieve the external IP address of a worker instance which is hosting the nginx pod:
|
||||
|
||||
```
|
||||
$ kubectl get pods -l app=nginx -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
nginx-554b9c67f9-gw87z 1/1 Running 0 27m 10.200.1.3 worker-1 <none> <none>
|
||||
|
||||
$ WORKER_NODE_NAME=$(kubectl get pods -l app=nginx -o=jsonpath='{.items[0].spec.nodeName}')
|
||||
$ echo $WORKER_NODE_NAME
|
||||
worker-1
|
||||
|
||||
$ EXTERNAL_IP=$(aws ec2 describe-instances \
|
||||
--filter "Name=tag:Name,Values=${WORKER_NODE_NAME}" \
|
||||
--query 'Reservations[0].Instances[0].PublicIpAddress' --output text)
|
||||
$ echo $EXTERNAL_IP
|
||||
54.xxx.xxx.18
|
||||
```
|
||||
|
||||
Make an HTTP request using the external IP address and the `nginx` node port:
|
||||
|
||||
```
|
||||
curl -I http://${EXTERNAL_IP}:${NODE_PORT}
|
||||
```
|
||||
$ curl -I http://${EXTERNAL_IP}:${NODE_PORT}
|
||||
|
||||
> output
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.17.3
|
||||
Date: Sat, 14 Sep 2019 21:12:35 GMT
|
||||
Server: nginx/1.17.8
|
||||
Date: Fri, 24 Jan 2020 20:02:27 GMT
|
||||
Content-Type: text/html
|
||||
Content-Length: 612
|
||||
Last-Modified: Tue, 13 Aug 2019 08:50:00 GMT
|
||||
Last-Modified: Tue, 21 Jan 2020 13:36:08 GMT
|
||||
Connection: keep-alive
|
||||
ETag: "5d5279b8-264"
|
||||
ETag: "5e26fe48-264"
|
||||
Accept-Ranges: bytes
|
||||
```
|
||||
|
||||
Next: [Cleaning Up](14-cleanup.md)
|
||||
Congrats! Now you have built your own Kubernetets cluster the hard way.
|
||||
|
||||
Next: [Cleaning Up](14-cleanup.md)
|
|
@ -1,56 +1,46 @@
|
|||
# Cleaning Up
|
||||
|
||||
In this lab you will delete the compute resources created during this tutorial.
|
||||
In this lab you will delete the AWS resources created during this tutorial.
|
||||
|
||||
## Compute Instances
|
||||
## Delete CloudFormation stacks
|
||||
|
||||
Delete the controller and worker compute instances:
|
||||
As you've created all resources via CloudFormation stacks, only things you should do is just deleting these stacks. It will delete undelying resources such as EC2 instances (master/worker), security groups, NLB, EIP and NLB.
|
||||
|
||||
One thing you should be aware is dependencies between stacks - if a stack uses exported values using `!ImportValues`, a stack that imports the value should be deleted first.
|
||||
|
||||
```
|
||||
gcloud -q compute instances delete \
|
||||
controller-0 controller-1 controller-2 \
|
||||
worker-0 worker-1 worker-2 \
|
||||
--zone $(gcloud config get-value compute/zone)
|
||||
$ for stack in hard-k8s-nodeport-sg-ingress \
|
||||
hard-k8s-pod-routes \
|
||||
hard-k8s-nlb \
|
||||
hard-k8s-worker-nodes \
|
||||
hard-k8s-master-nodes; \
|
||||
do \
|
||||
aws cloudformation delete-stack --stack-name ${stack} && \
|
||||
aws cloudformation wait stack-delete-complete --stack-name ${stack}
|
||||
done
|
||||
```
|
||||
|
||||
## Networking
|
||||
|
||||
Delete the external load balancer network resources:
|
||||
Next, release Elastic IP (EIP) that was used for Kubernetes API server frontend. After that you can remve CloudFormation stack with `--retain-resources` option, which actually doesn't "retain" but "ignore" EIP resource deletion.
|
||||
|
||||
```
|
||||
{
|
||||
gcloud -q compute forwarding-rules delete kubernetes-forwarding-rule \
|
||||
--region $(gcloud config get-value compute/region)
|
||||
$ ALLOCATION_ID=$(aws ec2 describe-addresses \
|
||||
--filters "Name=tag:Name,Values=eip-kubernetes-the-hard-way" \
|
||||
--query 'Addresses[0].AllocationId' --output text)
|
||||
|
||||
gcloud -q compute target-pools delete kubernetes-target-pool
|
||||
$ aws ec2 release-address --allocation-id $ALLOCATION_ID
|
||||
|
||||
gcloud -q compute http-health-checks delete kubernetes
|
||||
|
||||
gcloud -q compute addresses delete kubernetes-the-hard-way
|
||||
}
|
||||
$ aws cloudformation delete-stack --stack-name hard-k8s-eip --retain-resources HardK8sEIP
|
||||
```
|
||||
|
||||
Delete the `kubernetes-the-hard-way` firewall rules:
|
||||
Now, you can delete rest of stacks.
|
||||
|
||||
```
|
||||
gcloud -q compute firewall-rules delete \
|
||||
kubernetes-the-hard-way-allow-nginx-service \
|
||||
kubernetes-the-hard-way-allow-internal \
|
||||
kubernetes-the-hard-way-allow-external \
|
||||
kubernetes-the-hard-way-allow-health-check
|
||||
$ for stack in hard-k8s-security-groups \
|
||||
hard-k8s-network; \
|
||||
do \
|
||||
aws cloudformation delete-stack --stack-name ${stack} && \
|
||||
aws cloudformation wait stack-delete-complete --stack-name ${stack}
|
||||
done
|
||||
```
|
||||
|
||||
Delete the `kubernetes-the-hard-way` network VPC:
|
||||
|
||||
```
|
||||
{
|
||||
gcloud -q compute routes delete \
|
||||
kubernetes-route-10-200-0-0-24 \
|
||||
kubernetes-route-10-200-1-0-24 \
|
||||
kubernetes-route-10-200-2-0-24
|
||||
|
||||
gcloud -q compute networks subnets delete kubernetes
|
||||
|
||||
gcloud -q compute networks delete kubernetes-the-hard-way
|
||||
}
|
||||
```
|
||||
I hope you've enjoyed this tutorial. If you find any problem/suggestion please [open an issue](https://github.com/thash/kubernetes-the-hard-way-on-aws/issues).
|
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
REGION='us-west-2'
|
||||
|
||||
VPC_ID=$(aws --region $REGION cloudformation describe-stacks \
|
||||
--stack-name hard-k8s-network \
|
||||
--query 'Stacks[0].Outputs[?ExportName==`hard-k8s-vpc`].OutputValue' --output text)
|
||||
|
||||
aws --region $REGION ec2 describe-instances \
|
||||
--filters Name=vpc-id,Values=$VPC_ID \
|
||||
--query 'Reservations[].Instances[].[Tags[?Key==`Name`].Value | [0],InstanceId,Placement.AvailabilityZone,PrivateIpAddress,PublicIpAddress,State.Name]' \
|
||||
--output text | sort
|
Loading…
Reference in New Issue