make kube-dns work again

This commit is contained in:
Kelsey Hightower
2017-03-24 08:31:17 -07:00
parent c72849f7e3
commit 6827ce575e
6 changed files with 75 additions and 54 deletions

View File

@@ -15,31 +15,34 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-v20
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
version: v20
kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.8
image: gcr.io/google_containers/kubedns-amd64:1.9
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@@ -69,9 +72,15 @@ spec:
initialDelaySeconds: 3
timeoutSeconds: 5
args:
# command = "/kube-dns"
- --domain=cluster.local
- --domain=cluster.local.
- --dns-port=10053
- --config-map=kube-dns
# This should be set to v=2 only after the new image (cut from 1.5) has
# been released, otherwise we will flood the logs.
- --v=0
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
@@ -79,6 +88,9 @@ spec:
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
livenessProbe:
@@ -102,6 +114,32 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 10Mi
- name: dnsmasq-metrics
image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 10Mi
- name: healthz
image: gcr.io/google_containers/exechealthz-amd64:1.2
resources:
@@ -109,6 +147,10 @@ spec:
memory: 50Mi
requests:
cpu: 10m
# Note that this container shouldn't really need 50Mi of memory. The
# limits are set higher than expected pending investigation on #29688.
# The extra memory was stolen from the kubedns container to keep the
# net memory requested by the pod constant.
memory: 50Mi
args:
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null