finalyze docker support

pull/863/head
Ruslan Savchuk 2025-04-16 22:07:20 +02:00
parent 39a5cc646d
commit 84a7bd6f1a
16 changed files with 89 additions and 1148 deletions

50
.gitignore vendored
View File

@ -1,50 +0,0 @@
admin-csr.json
admin-key.pem
admin.csr
admin.pem
admin.kubeconfig
ca-config.json
ca-csr.json
ca-key.pem
ca.csr
ca.pem
encryption-config.yaml
kube-controller-manager-csr.json
kube-controller-manager-key.pem
kube-controller-manager.csr
kube-controller-manager.kubeconfig
kube-controller-manager.pem
kube-scheduler-csr.json
kube-scheduler-key.pem
kube-scheduler.csr
kube-scheduler.kubeconfig
kube-scheduler.pem
kube-proxy-csr.json
kube-proxy-key.pem
kube-proxy.csr
kube-proxy.kubeconfig
kube-proxy.pem
kubernetes-csr.json
kubernetes-key.pem
kubernetes.csr
kubernetes.pem
worker-0-csr.json
worker-0-key.pem
worker-0.csr
worker-0.kubeconfig
worker-0.pem
worker-1-csr.json
worker-1-key.pem
worker-1.csr
worker-1.kubeconfig
worker-1.pem
worker-2-csr.json
worker-2-key.pem
worker-2.csr
worker-2.kubeconfig
worker-2.pem
service-account-key.pem
service-account.csr
service-account.pem
service-account-csr.json
*.swp

View File

@ -1,18 +0,0 @@
This project is made possible by contributors like YOU! While all contributions are welcomed, please be sure and follow the following suggestions to help your PR get merged.
## License
This project uses an [Apache license](LICENSE). Be sure you're comfortable with the implications of that before working up a patch.
## Review and merge process
Review and merge duties are managed by [@kelseyhightower](https://github.com/kelseyhightower). Expect some burden of proof for demonstrating the marginal value of adding new content to the tutorial.
Here are some examples of the review and justification process:
- [#208](https://github.com/kelseyhightower/kubernetes-the-hard-way/pull/208)
- [#282](https://github.com/kelseyhightower/kubernetes-the-hard-way/pull/282)
## Notes on minutiae
If you find a bug that breaks the guide, please do submit it. If you are considering a minor copy edit for tone, grammar, or simple inconsistent whitespace, consider the tradeoff between maintainer time and community benefit before investing too much of your time.

View File

@ -1,12 +0,0 @@
FROM ubuntu:22.04
RUN apt update \
&& apt install -y wget systemd kmod systemd-sysv vim less iptables \
&& rm -rf /var/lib/apt/lists/*
RUN systemctl set-default multi-user.target
RUN find /etc/systemd/system /lib/systemd/system \
-path '*.wants/*' -not -name '*systemd*' -exec rm -f {} \;
ENTRYPOINT ["/lib/systemd/systemd", "--system"]

View File

@ -4,7 +4,9 @@ This tutorial is partially based on [Kubernetes The Hard Way](https://github.com
The main focus of this tutorial is to explain the necessity of Kubernetes components. That is why there is no need to configure multiple instances of each component and allow us to set up a single-node Kubernetes cluster. Of course, the cluster created can't be used as a production-ready Kubernetes cluster.
To configure the cluster mentioned, we will use Ubuntu server 20.04 (author uses the VM in Hetzner).
To run the labs you need one of the following:
- vm with ubuntu 20.04
- preconfigured docker container ([here is the manual how to do that](./docs/00-docker.md))
## Copyright
@ -22,7 +24,6 @@ To configure the cluster mentioned, we will use Ubuntu server 20.04 (author uses
* [Scheduler](./docs/07-scheduler.md)
* [Controller manager](./docs/08-controller-manager.md)
* [Kube-proxy](./docs/09-kubeproxy.md)
* [DNS in Kubernetes](./docs/10-dns.md)
@ -34,10 +35,22 @@ docker run -d \
--privileged \
--security-opt seccomp=unconfined \
--security-opt apparmor=unconfined \
--cap-add=NET_ADMIN \
--cap-add=NET_RAW \
-v /sys/fs/cgroup:/sys/fs/cgroup:rw \
-v /lib/modules:/lib/modules:ro \
--tmpfs /tmp \
--tmpfs /run \
--tmpfs /run/lock \
kindest/base:v20250312-b98dc21a \
/sbin/init
ubuntu-systemd
docker run -d \
--name ubuntu-systemd-container \
--privileged \
--security-opt seccomp=unconfined \
--security-opt apparmor=unconfined \
-v /sys/fs/cgroup:/sys/fs/cgroup:rw \
--tmpfs /tmp \
--tmpfs /run \
--tmpfs /run/lock \
ubuntu-systemd

View File

@ -1,180 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:1.7.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.32.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -1,206 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.32.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns

52
docs/00-docker.md Normal file
View File

@ -0,0 +1,52 @@
# Build container image
Create dockerfile for the container
```bash
cat <<EOF | tee Dockerfile
FROM ubuntu:22.04
RUN apt update \
&& apt install -y wget systemd kmod systemd-sysv vim less iptables \
&& rm -rf /var/lib/apt/lists/*
RUN systemctl set-default multi-user.target
RUN find /etc/systemd/system /lib/systemd/system \
-path '*.wants/*' -not -name '*systemd*' -exec rm -f {} \;
CMD mkdir /workdir
WORKDIR /workdir
ENTRYPOINT ["/lib/systemd/systemd", "--system"]
EOF
```
Build container image
```bash
docker build -t ubuntu-systemd .
```
Run created container image
```bash
docker run -d \
--name ubuntu-systemd-container \
--privileged \
--security-opt seccomp=unconfined \
--security-opt apparmor=unconfined \
-v /sys/fs/cgroup:/sys/fs/cgroup:rw \
--tmpfs /tmp \
--tmpfs /run \
--tmpfs /run/lock \
ubuntu-systemd
```
And now we need to run bash inside container
```bash
docker exec -it ubuntu-systemd-container bash
```
Next: [Kubernetes architecture](./00-kubernetes-architecture.md)

View File

@ -42,7 +42,7 @@ mkdir -p busybox-container/rootfs/bin \
&& ./busybox-x86_64 --install . \
&& cd ./../.. \
&& runc spec \
&& sed -i 's/"sh"/"echo","Hello from container runned by runc!","sleep","3600"/' config.json
&& sed -i 's/"sh"/"echo","Hello from container runned by runc!"/' config.json
```
In this step, we downloaded the busybox image, unarchived it, and created the proper files, required by runc to run the container (including container configuration and files that will be accessible from the container). So, let's run our container
@ -190,9 +190,7 @@ docker.io/library/busybox:latest application/vnd.docker.distribution.manifest.li
Now, let's start our container
```bash
ctr run --rm --snapshotter native docker.io/library/busybox:latest busybox-container sh -c 'echo "Hello"'
ctr run --detach --runtime io.containerd.runc.v2 --snapshotter native docker.io/library/busybox:latest busybox-container sh -c 'sleep 3600'
ctr run --detach docker.io/library/busybox:latest busybox-container sh -c 'echo "Hello from container runned by containerd!"'
ctr run --detach --snapshotter native docker.io/library/busybox:latest busybox-container sh -c 'while sleep 1; do echo "Hi"; done'
```
Output:
@ -219,12 +217,19 @@ ctr task ls
Output:
```
TASK PID STATUS
busybox-container 2862580 STOPPED
busybox-container 2862580 RUNNING
```
As we can see our container is in the stopped state (because the command was successfully executed and the container stopped).
Now, let's clean up our workspace and go to the next section.
Stop running command
```bash
kill -9 $(ctr task ls | grep busybox | awk '{print $2}')
```
And remove the created container
```bash
ctr containers rm busybox-container
```

View File

@ -24,22 +24,16 @@ First of all, we need to download kubelet.
```bash
wget -q --show-progress --https-only --timestamping \
https://dl.k8s.io/v1.32.3/bin/linux/amd64/kubelet
tar -xvzf kubernetes-node-linux-amd64.tar.gz
```
After download process complete, move kubelet binaries to the proper folder
```bash
# chmod +x kubelet \
# && mv kubelet /usr/local/bin/
chmod +x kubelet \
&& mv kubelet /usr/local/bin/
```
Ensure swap is disabled
```bash
chmod +x kubernetes/node/bin/kubelet \
&& mv kubernetes/node/bin/kubelet /usr/local/bin/
```
```bash
ensure swap is disabled
swapoff -a
```

View File

@ -37,7 +37,7 @@ As we can see our nginx container is up and running.
Let's check whether it works as expected.
```bash
curl localhost
wget -O- localhost
```
Output:
@ -178,7 +178,9 @@ crictl pods
Output:
```
POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME
POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME
dd37d609e012d About a minute ago NotReady static-nginx-2-b66c13e037b3 default 0 (default)
42c3883717b2d About a minute ago NotReady static-nginx-b66c13e037b3 default 0 (default)
```
We see nothing.
@ -390,7 +392,7 @@ So, let's try to curl the container.
PID=$(crictl pods --label app=static-nginx-2 -q)
CID=$(crictl ps -q --pod $PID)
IP=$(crictl exec $CID ip a | grep 240 | awk '{print $2}' | cut -f1 -d'/')
curl $IP
wget -O- $IP
}
```
@ -496,10 +498,6 @@ Commercial support is available at
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
Connecting to 10.240.1.4 (10.240.1.4:80)
writing to stdout
- 100% |********************************| 615 0:00:00 ETA
written to stdout
```
As we can see we successfully reached our container from busybox.

View File

@ -214,7 +214,7 @@ Output:
## verify
When etcd is up and running, we can check whether we can communicate with it
```
```bash
ETCDCTL_API=3 etcdctl member list \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.pem \

View File

@ -231,47 +231,6 @@ But now, lets view logs using kubectl instead of crictl. In our case it is maybe
kubectl logs hello-world
```
Output:
```
Error from server (Forbidden): Forbidden (user=kubernetes, verb=get, resource=nodes, subresource=proxy) ( pods/log hello-world)
```
As we can see api server has no permissions to read logs from the node. This message apears, because during authorization, kubelet ask api server if the user with the name kubernetes has proper permission, but now it is not true. So let's fix this
```bash
{
cat <<EOF> node-auth.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-proxy-access
rules:
- apiGroups: [""]
resources: ["nodes/proxy"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-proxy-access-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: node-proxy-access
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF
kubectl apply -f node-auth.yml
}
```
After our cluster role and role binding creted we can retry
```bash
kubectl logs hello-world
```
Output:
```
Hello, World!
@ -283,8 +242,6 @@ Hello, World!
As you can see, we can create pods and kubelet will run that pods.
Note: it takes some time to apply created RBAC policies.
Now, we need to clean-up out workspace.
```bash
kubectl delete -f pod.yaml

View File

@ -92,47 +92,6 @@ And execute command from our container
kubectl exec busy-box -- wget -O - $(kubectl get pod -o wide | grep nginx | awk '{print $6}' | head -n 1)
```
Output:
```
error: unable to upgrade connection: Forbidden (user=kubernetes, verb=create, resource=nodes, subresource=proxy)
```
This error occured, because api server has no access to execute commands. We will fix this issue, by creating cluster role and assigning it role to kubernetes user.
```bash
{
cat <<EOF | tee rbac-create.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-user-clusterrole
rules:
- apiGroups: [""]
resources: ["nodes/proxy"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-user-clusterrolebinding
subjects:
- kind: User
name: kubernetes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubernetes-user-clusterrole
apiGroup: rbac.authorization.k8s.io
EOF
kubectl apply -f rbac-create.yml
}
```
Now, we can execute command
```bash
kubectl exec busy-box -- wget -O - $(kubectl get pod -o wide | grep nginx | awk '{print $6}' | head -n 1)
```
Output:
```
Hello from pod: nginx-deployment-68b9c94586-qkwjc
@ -347,5 +306,3 @@ written to stdout
```
If you try to repeat the command once again you will see that requests are handled by different pods.
Next: [DNS in Kubernetes](./10-dns.md)

View File

@ -1,46 +0,0 @@
# DNS in Kubernetes
As we saw in previous section, kubernetes has special component to solve service discovery issues. But we solved it only partially.
In this section we will figure out with the next part of the service discovery.
If you remember, in previous section we accessed service by using its IP address. But it solves the issue only partially, as we still need to know the service IP address. To solve second part of it - we will configure DNS server in kubernetes.
> In Kubernetes, DNS (Domain Name System) is a crucial component that enables service discovery and communication between various resources within a cluster. DNS allows you to refer to services, pods, and other Kubernetes objects by their domain names instead of IP addresses, making it easier to manage and communicate between them.
Befire we will configure it, we can check if we can access our service (created in previuos section) by its name.
```bash
kubectl exec busy-box -- wget -O - nginx-service.default.svc.cluster.local.
```
And nothing happen. The reason of this befaviour - pod can't resolve IP address of the domain name requested as DNS server is not configured in our cluster.
Also, would like to mention, that kubernetes automatically configure DNS system in pod to use "special" DNS server configured for our cluster, this DNS server was configured using during setting up kubelet
```
...
clusterDNS:
- "10.32.0.10"
...
```
We will configure DNS server with the usage of the coredns, and will install it using out kubernetes cluster
```bash
kubectl apply -f https://raw.githubusercontent.com/ruslansavchuk/kubernetes-the-hard-way/master/manifests/coredns.yml -n kube-system
```
After our DNS server is up and running, we can try to repeat the call once again
```bash
kubectl exec busy-box -- wget -O - nginx-service.default.svc.cluster.local.
```
Output:
```
Hello from pod: nginx-deployment-68b9c94586-zh9vn
Connecting to nginx-service (10.32.0.230:80)
writing to stdout
- 100% |********************************| 50 0:00:00 ETA
written to stdout
```
As you can see everything works as expected.

View File

@ -1,165 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
labels:
k8s-app: coredns
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus 0.0.0.0:9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: coredns
labels:
k8s-app: coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: coredns
labels:
k8s-app: coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns
subjects:
- kind: ServiceAccount
name: default
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
name: coredns
labels:
k8s-app: coredns
spec:
selector:
k8s-app: coredns
ports:
- { "name": "udp-53", "port": 53, "protocol": "UDP" }
- { "name": "tcp-53", "port": 53, "protocol": "TCP" }
type: ClusterIP
clusterIP: 10.32.0.10
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
labels:
k8s-app: coredns
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
annotations:
checksum/config: 158d059c22a85c971e58da7eb18cd7fdf4ddd759f217f75f61d7d1a82e1167e6
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
terminationGracePeriodSeconds: 30
serviceAccountName: default
dnsPolicy: Default
containers:
- name: "coredns"
image: "coredns/coredns:1.10.1"
imagePullPolicy: IfNotPresent
args: ["-conf", "/etc/coredns/Corefile"]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
ports:
- { "containerPort": 53, "name": "udp-53", "protocol": "UDP" }
- { "containerPort": 53, "name": "tcp-53", "protocol": "TCP" }
- { "containerPort": 9153, "name": "tcp-9153", "protocol": "TCP" }
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile

View File

@ -1,358 +0,0 @@
#!/bin/bash
stage=""
# Parse command line arguments
while [[ "$#" -gt 0 ]]; do
case $1 in
--stage)
stage="$2"
shift # Remove --last-stage
shift # Remove the value
;;
*)
echo "Unknown parameter: $1"
exit 1
;;
esac
done
# init container runtime
if [ "$stage" = "configure-runtime" ]; then
echo '====================== download runc ========================='
wget -q --show-progress --https-only --timestamping \
https://github.com/opencontainers/runc/releases/download/v1.2.6/runc.amd64
echo '====================== make runc executable ========================='
mv runc.amd64 runc \
&& chmod +x runc \
&& mv runc /usr/local/bin/
echo '====================== download containerd ========================='
wget https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-linux-amd64.tar.gz
echo '====================== make containerd executable ========================='
mkdir containerd \
&& tar -xvf containerd-2.0.4-linux-amd64.tar.gz -C containerd \
&& mv containerd/bin/* /bin/
echo '====================== int containerd service ========================='
mkdir -p /etc/containerd/
cat << EOF | tee /etc/containerd/config.toml
[debug]
level = "debug"
[plugins]
[plugins.'io.containerd.cri.v1.images']
snapshotter = "native"
[plugins."io.containerd.cri.v1.runtime"]
[plugins."io.containerd.cri.v1.runtime".containerd]
default_runtime_name = "runc"
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes]
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
snapshotter = "native"
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.runc.options]
BinaryName = "/usr/local/bin/runc"
EOF
cat <<EOF | tee /etc/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload \
&& systemctl enable containerd \
&& systemctl start containerd
systemctl status containerd
exit 0
elif [ "$stage" = "configure-kubelet" ]; then
echo '====================== download kubelet ========================='
wget -q --show-progress --https-only --timestamping \
https://dl.k8s.io/v1.32.3/kubernetes-node-linux-amd64.tar.gz
tar -xvzf kubernetes-node-linux-amd64.tar.gz
echo '====================== make kubelet executable ========================='
chmod +x kubernetes/node/bin/kubelet \
&& mv kubernetes/node/bin/kubelet /usr/local/bin/
echo '====================== disable swap ========================='
swapoff -a
echo '====================== int kubelet service ========================='
cat <<EOF | tee /etc/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/home/
Wants=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/local/bin/kubelet \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--file-check-frequency=10s \\
--pod-manifest-path='/etc/kubernetes/manifests/' \\
--v=10
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload \
&& systemctl enable kubelet \
&& systemctl start kubelet
systemctl status kubelet
echo '====================== download crictl ========================='
wget -q --show-progress --https-only --timestamping \
https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.32.0/crictl-v1.32.0-linux-amd64.tar.gz
tar -xvf crictl-v1.32.0-linux-amd64.tar.gz \
&& chmod +x crictl \
&& mv crictl /usr/local/bin/
echo '====================== configure crictl ========================='
tar -xvf crictl-v1.32.0-linux-amd64.tar.gz \
&& chmod +x crictl \
&& mv crictl /usr/local/bin/
cat <<EOF | tee /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
exit 0
# configure networking
elif [ "$stage" = "configure-networking" ]; then
echo '====================== download cni-plugins ========================='
wget -q --show-progress --https-only --timestamping \
https://github.com/containernetworking/plugins/releases/download/v1.6.2/cni-plugins-linux-amd64-v1.6.2.tgz
echo '====================== configure cni-plugins ========================='
mkdir -p \
/etc/cni/net.d \
/opt/cni/bin
tar -xvf cni-plugins-linux-amd64-v1.6.2.tgz -C /opt/cni/bin/
cat <<EOF | tee /etc/cni/net.d/10-bridge.conf
{
"cniVersion": "0.4.0",
"name": "bridge",
"type": "bridge",
"bridge": "cnio0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"ranges": [
[{"subnet": "10.240.1.0/24"}]
],
"routes": [{"dst": "0.0.0.0/0"}]
}
}
EOF
cat <<EOF | tee /etc/cni/net.d/99-loopback.conf
{
"cniVersion": "0.4.0",
"name": "lo",
"type": "loopback"
}
EOF
echo '====================== reconfigure cni-plugins ========================='
cat <<EOF | tee /var/lib/kubelet/kubelet-config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
anonymous:
enabled: true
webhook:
enabled: false
authorization:
mode: AlwaysAllow
networkPlugin: "cni"
cniConfDir: "/etc/cni/net.d"
cniBinDir: "/opt/cni/bin"
EOF
cat <<EOF | tee /etc/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/home/
Wants=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/local/bin/kubelet \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--file-check-frequency=10s \\
--config=/var/lib/kubelet/kubelet-config.yaml \\
--pod-manifest-path='/etc/kubernetes/manifests/' \\
--v=10
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload \
&& systemctl restart kubelet
systemctl status kubelet
exit 0
# configure etcd
elif [ "$stage" = "configure-etcd" ]; then
echo '====================== download cert tools ========================='
wget -q --show-progress --https-only --timestamping \
https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl_1.4.1_linux_amd64 \
https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssljson_1.4.1_linux_amd64
mv cfssl_1.4.1_linux_amd64 cfssl \
&& mv cfssljson_1.4.1_linux_amd64 cfssljson \
&& chmod +x cfssl cfssljson \
&& mv cfssl cfssljson /usr/local/bin/
echo '====================== generate etcd certs ========================='
cat <<EOF | tee ca-config.json
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}
EOF
cat <<EOF | tee ca-csr.json
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "CA",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
HOST_NAME=$(hostname -a)
KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local
cat <<EOF | tee kubernetes-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=worker,127.0.0.1,${KUBERNETES_HOSTNAMES},10.32.0.1 \
-profile=kubernetes \
kubernetes-csr.json | cfssljson -bare kubernetes
echo '====================== download etcd ========================='
wget -q --show-progress --https-only --timestamping \
"https://github.com/etcd-io/etcd/releases/download/v3.4.15/etcd-v3.4.15-linux-amd64.tar.gz"
echo '====================== configure etcd ========================='
tar -xvf etcd-v3.4.15-linux-amd64.tar.gz \
&& mv etcd-v3.4.15-linux-amd64/etcd* /usr/local/bin/
mkdir -p /etc/etcd /var/lib/etcd \
&& chmod 700 /var/lib/etcd \
&& cp ca.pem kubernetes.pem kubernetes-key.pem /etc/etcd/
cat <<EOF | tee /etc/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \\
--client-cert-auth \\
--name etcd \\
--cert-file=/etc/etcd/kubernetes.pem \\
--key-file=/etc/etcd/kubernetes-key.pem \\
--trusted-ca-file=/etc/etcd/ca.pem \\
--listen-client-urls https://127.0.0.1:2379 \\
--advertise-client-urls https://127.0.0.1:2379 \\
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload \
&& systemctl enable etcd \
&& systemctl start etcd
systemctl status etcd
exit 0
# configure api server
elif [ "$stage" = "configure-etcd" ]; then
fi