20 KiB
20 KiB
{
wget -q --show-progress --https-only --timestamping \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssl \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssljson
chmod +x cfssl cfssljson
sudo mv cfssl cfssljson /usr/local/bin/
}
{
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "CA",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
}
Результат:
ca-key.pem
ca.csr
ca.pem
{
KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=worker,127.0.0.1,${KUBERNETES_HOSTNAMES} \
-profile=kubernetes \
kubernetes-csr.json | cfssljson -bare kubernetes
}
Завантажимо etcd
wget -q --show-progress --https-only --timestamping \
"https://github.com/etcd-io/etcd/releases/download/v3.4.15/etcd-v3.4.15-linux-amd64.tar.gz"
Розпакувати і помістити etcd у диреторію /usr/local/bin/
{
tar -xvf etcd-v3.4.15-linux-amd64.tar.gz
sudo mv etcd-v3.4.15-linux-amd64/etcd* /usr/local/bin/
}
{
sudo mkdir -p /etc/etcd /var/lib/etcd
sudo chmod 700 /var/lib/etcd
sudo cp ca.pem \
kubernetes.pem kubernetes-key.pem \
/etc/etcd/
}
cat <<EOF | sudo tee /etc/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \\
--name etcd \\
--cert-file=/etc/etcd/kubernetes.pem \\
--key-file=/etc/etcd/kubernetes-key.pem \\
--trusted-ca-file=/etc/etcd/ca.pem \\
--client-cert-auth \\
--listen-client-urls https://127.0.0.1:2379 \\
--advertise-client-urls https://127.0.0.1:2379 \\
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
{
sudo systemctl daemon-reload
sudo systemctl enable etcd
sudo systemctl start etcd
}
sudo ETCDCTL_API=3 etcdctl member list \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.pem \
--cert=/etc/etcd/kubernetes.pem \
--key=/etc/etcd/kubernetes-key.pem
api server
{
cat > service-account-csr.json <<EOF
{
"CN": "service-accounts",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
service-account-csr.json | cfssljson -bare service-account
}
{
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:masters",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare admin
}
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: ${ENCRYPTION_KEY}
- identity: {}
EOF
sudo mkdir -p /etc/kubernetes/config
wget -q --show-progress --https-only --timestamping \
"https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-apiserver"
{
chmod +x kube-apiserver
sudo mv kube-apiserver /usr/local/bin/
}
{
sudo mkdir -p /var/lib/kubernetes/
sudo cp \
ca.pem \
kubernetes.pem kubernetes-key.pem \
encryption-config.yaml \
service-account-key.pem service-account.pem \
/var/lib/kubernetes/
}
sudo mkdir -p /etc/kubernetes/config
cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--advertise-address='91.107.220.4' \\
--allow-privileged='true' \\
--apiserver-count='3' \\
--audit-log-maxage='30' \\
--audit-log-maxbackup='3' \\
--audit-log-maxsize='100' \\
--audit-log-path='/var/log/audit.log' \\
--authorization-mode='Node,RBAC' \\
--bind-address='0.0.0.0' \\
--client-ca-file='/var/lib/kubernetes/ca.pem' \\
--enable-admission-plugins='NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota' \\
--etcd-cafile='/var/lib/kubernetes/ca.pem' \\
--etcd-certfile='/var/lib/kubernetes/kubernetes.pem' \\
--etcd-keyfile='/var/lib/kubernetes/kubernetes-key.pem' \\
--etcd-servers='https://127.0.0.1:2379' \\
--event-ttl='1h' \\
--encryption-provider-config='/var/lib/kubernetes/encryption-config.yaml' \\
--kubelet-certificate-authority='/var/lib/kubernetes/ca.pem' \\
--kubelet-client-certificate='/var/lib/kubernetes/kubernetes.pem' \\
--kubelet-client-key='/var/lib/kubernetes/kubernetes-key.pem' \\
--runtime-config='api/all=true' \\
--service-account-key-file='/var/lib/kubernetes/service-account.pem' \\
--service-cluster-ip-range='10.32.0.0/24' \\
--service-node-port-range='30000-32767' \\
--tls-cert-file='/var/lib/kubernetes/kubernetes.pem' \\
--tls-private-key-file='/var/lib/kubernetes/kubernetes-key.pem' \\
--service-account-signing-key-file='/var/lib/kubernetes/service-account-key.pem' \\
--service-account-issuer='https://kubernetes.default.svc.cluster.local' \\
--api-audiences='https://kubernetes.default.svc.cluster.local' \\
--v='2'
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
{
sudo systemctl daemon-reload
sudo systemctl enable kube-apiserver
sudo systemctl start kube-apiserver
}
wget -q --show-progress --https-only --timestamping \
https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl \
&& chmod +x kubectl \
&& sudo mv kubectl /usr/local/bin/
{
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=admin \
--kubeconfig=admin.kubeconfig
kubectl config use-context default --kubeconfig=admin.kubeconfig
}
kubectl version --kubeconfig=admin.kubeconfig
{
cat <<EOF> pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: hello-world
spec:
serviceAccountName: hello-world
containers:
- name: hello-world-container
image: busybox
command: ['sh', '-c', 'while true; do echo "Hello, World!"; sleep 1; done']
nodeName: worker
EOF
cat <<EOF> sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hello-world
automountServiceAccountToken: false
EOF
kubectl apply -f sa.yaml --kubeconfig=admin.kubeconfig
kubectl apply -f pod.yaml --kubeconfig=admin.kubeconfig
}
kubelet
????, ага ще напевно потрібно виписувати сертифікати на публічний айпішнік
sudo echo "127.0.0.1 worker" >> /etc/hosts
{
cat > kubelet-csr.json <<EOF
{
"CN": "system:node:worker",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1 \
-profile=kubernetes \
kubelet-csr.json | cfssljson -bare kubelet
}
{
sudo apt-get update
sudo apt-get -y install socat conntrack ipset
}
sudo swapon --show
sudo swapoff -a
wget -q --show-progress --https-only --timestamping \
https://github.com/opencontainers/runc/releases/download/v1.0.0-rc93/runc.amd64 \
https://github.com/containernetworking/plugins/releases/download/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz \
https://github.com/containerd/containerd/releases/download/v1.4.4/containerd-1.4.4-linux-amd64.tar.gz \
https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-proxy \
https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
sudo mkdir -p \
/etc/cni/net.d \
/opt/cni/bin \
/var/lib/kubelet \
/var/lib/kube-proxy \
/var/lib/kubernetes \
/var/run/kubernetes
{
mkdir containerd
tar -xvf containerd-1.4.4-linux-amd64.tar.gz -C containerd
sudo tar -xvf cni-plugins-linux-amd64-v0.9.1.tgz -C /opt/cni/bin/
sudo mv runc.amd64 runc
chmod +x kube-proxy kubelet runc
sudo mv kube-proxy kubelet runc /usr/local/bin/
sudo mv containerd/bin/* /bin/
}
cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
{
"cniVersion": "0.4.0",
"name": "bridge",
"type": "bridge",
"bridge": "cnio0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"ranges": [
[{"subnet": "10.240.1.0/24"}]
],
"routes": [{"dst": "0.0.0.0/0"}]
}
}
EOF
cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
{
"cniVersion": "0.4.0",
"name": "lo",
"type": "loopback"
}
EOF
sudo mkdir -p /etc/containerd/
cat << EOF | sudo tee /etc/containerd/config.toml
[plugins]
[plugins.cri.containerd]
snapshotter = "overlayfs"
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runc"
runtime_root = ""
EOF
cat <<EOF | sudo tee /etc/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
{
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kubelet.kubeconfig
kubectl config set-credentials system:node:worker \
--client-certificate=kubelet.pem \
--client-key=kubelet-key.pem \
--embed-certs=true \
--kubeconfig=kubelet.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=system:node:worker \
--kubeconfig=kubelet.kubeconfig
kubectl config use-context default --kubeconfig=kubelet.kubeconfig
}
{
sudo cp kubelet-key.pem kubelet.pem /var/lib/kubelet/
sudo cp kubelet.kubeconfig /var/lib/kubelet/kubeconfig
sudo cp ca.pem /var/lib/kubernetes/
}
cat <<EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/var/lib/kubernetes/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "10.32.0.10"
podCIDR: "10.240.1.0/24"
resolvConf: "/run/systemd/resolve/resolv.conf"
runtimeRequestTimeout: "15m"
tlsCertFile: "/var/lib/kubelet/kubelet.pem"
tlsPrivateKeyFile: "/var/lib/kubelet/kubelet-key.pem"
EOF
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--config=/var/lib/kubelet/kubelet-config.yaml \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--image-pull-progress-deadline=2m \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\
--network-plugin=cni \\
--register-node=true \\
--hostname-override=worker \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
{
sudo systemctl daemon-reload
sudo systemctl enable kubelet
sudo systemctl start kubelet
}
kubectl get nodes --kubeconfig=admin.kubeconfig
kubectl get pod --kubeconfig=admin.kubeconfig
cat <<EOF> nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
spec:
serviceAccountName: hello-world
containers:
- name: nginx-container
image: nginx
ports:
- containerPort: 80
nodeName: worker
EOF
kubectl apply -f nginx-pod.yaml --kubeconfig=admin.kubeconfig
kubectl get pod nginx-pod --kubeconfig=admin.kubeconfig -o=jsonpath='{.status.podIP}'
curl $(kubectl get pod nginx-pod --kubeconfig=admin.kubeconfig -o=jsonpath='{.status.podIP}')
kubectl delete -f nginx-pod.yaml --kubeconfig=admin.kubeconfig
kubectl delete -f pod.yaml --kubeconfig=admin.kubeconfig
kubectl delete -f sa.yaml --kubeconfig=admin.kubeconfig
cat <<EOF> nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx-container
image: nginx
ports:
- containerPort: 80
EOF
kubectl apply -f nginx-deployment.yaml --kubeconfig=admin.kubeconfig
kubectl get pod --kubeconfig=admin.kubeconfig
kubectl get deployment --kubeconfig=admin.kubeconfig
такс деплоймент є а подів немає - неподобство
controller manager
{
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:kube-controller-manager",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
}
{
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=kube-controller-manager.pem \
--client-key=kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
}
wget -q --show-progress --https-only --timestamping \
"https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-controller-manager"
{
chmod +x kube-controller-manager
sudo mv kube-controller-manager /usr/local/bin/
}
sudo mv kube-controller-manager.kubeconfig /var/lib/kubernetes/
sudo cp ca-key.pem /var/lib/kubernetes/
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--bind-address=0.0.0.0 \\
--cluster-cidr=10.200.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--root-ca-file=/var/lib/kubernetes/ca.pem \\
--service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\
--service-cluster-ip-range=10.32.0.0/24 \\
--use-service-account-credentials=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
{
sudo systemctl daemon-reload
sudo systemctl enable kube-controller-manager
sudo systemctl start kube-controller-manager
}
kubectl get pod --kubeconfig=admin.kubeconfig
такс, бачимо що наші поди створились але вони незапускаються ніяк
kube scheduler
{
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:kube-scheduler",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
}
{
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
}
wget -q --show-progress --https-only --timestamping \
"https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-scheduler"
{
chmod +x kube-scheduler
sudo mv kube-scheduler /usr/local/bin/
}
sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/
cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
leaderElection:
leaderElect: true
EOF
cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--config=/etc/kubernetes/config/kube-scheduler.yaml \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
{
sudo systemctl daemon-reload
sudo systemctl enable kube-scheduler
sudo systemctl start kube-scheduler
}
kubectl get pod --kubeconfig=admin.kubeconfig
нарешті ми бачимо наші поди, вони запущені і ми навіть можемо перевірити чи вони працюють
curl $(kubectl get pods -l app=nginx --kubeconfig=admin.kubeconfig -o=jsonpath='{.items[0].status.podIP}')
чотко, бачимо що запустилось і працює