apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "KubeDNS" spec: clusterIP: 10.32.0.10 ports: - name: dns port: 53 protocol: UDP targetPort: 53 - name: dns-tcp port: 53 protocol: TCP targetPort: 53 selector: k8s-app: kube-dns sessionAffinity: None type: ClusterIP --- apiVersion: extensions/v1beta1 kind: Deployment metadata: labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" name: kube-dns namespace: kube-system spec: replicas: 2 selector: matchLabels: k8s-app: kube-dns strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 type: RollingUpdate template: metadata: annotations: scheduler.alpha.kubernetes.io/critical-pod: "" creationTimestamp: null labels: k8s-app: kube-dns spec: containers: - name: kubedns image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4 env: - name: PROMETHEUS_PORT value: "10055" args: - --domain=cluster.local. - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 livenessProbe: failureThreshold: 5 httpGet: path: /healthcheck/kubedns port: 10054 scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 ports: - name: dns-local containerPort: 10053 protocol: UDP - name: dns-tcp-local containerPort: 10053 protocol: TCP - name: metrics containerPort: 10055 protocol: TCP readinessProbe: failureThreshold: 3 httpGet: path: /readiness port: 8081 scheme: HTTP initialDelaySeconds: 3 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi volumeMounts: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.4 args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --log-facility=- - --server=/cluster.local/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 livenessProbe: failureThreshold: 5 httpGet: path: /healthcheck/dnsmasq port: 10054 scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 ports: - name: dns containerPort: 53 protocol: UDP - name: dns-tcp containerPort: 53 protocol: TCP resources: requests: cpu: 150m memory: 20Mi volumeMounts: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.4 args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A livenessProbe: failureThreshold: 5 httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 ports: - name: metrics containerPort: 10054 protocol: TCP resources: requests: cpu: 10m memory: 20Mi dnsPolicy: Default restartPolicy: Always serviceAccount: kube-dns serviceAccountName: kube-dns terminationGracePeriodSeconds: 30 tolerations: - key: CriticalAddonsOnly operator: Exists volumes: - name: kube-dns-config configMap: defaultMode: 420 name: kube-dns optional: true