k8s部署nodelocaldns默认yaml,建议测试环境验证后,确认无问题后在部署生产环境,默认yaml会部署两个内部DNS地址169.254.20.10,10.96.0.10,
可能会导致k8s-DNS 5秒延时问题
apiVersion: v1 kind: ServiceAccount metadata: name: node-local-dns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: v1 kind: Service metadata: name: kube-dns-upstream namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNSUpstream" spec: ports: - name: dns port: 53 protocol: UDP targetPort: 53 - name: dns-tcp port: 53 protocol: TCP targetPort: 53 selector: k8s-app: kube-dns --- apiVersion: v1 kind: ConfigMap metadata: name: node-local-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile data: Corefile: | cluster.local:53 { errors cache { success 9984 30 denial 9984 5 } reload loop bind 169.254.20.10 10.96.0.10 forward . __PILLAR__CLUSTER__DNS__ { force_tcp } prometheus :9253 health 169.254.20.10:8080 } in-addr.arpa:53 { errors cache 30 reload loop bind 169.254.20.10 10.96.0.10 forward . __PILLAR__CLUSTER__DNS__ { force_tcp } prometheus :9253 } ip6.arpa:53 { errors cache 30 reload loop bind 169.254.20.10 10.96.0.10 forward . __PILLAR__CLUSTER__DNS__ { force_tcp } prometheus :9253 } .:53 { errors cache 30 reload loop bind 169.254.20.10 10.96.0.10 forward . 10.98.53.229 prometheus :9253 } --- apiVersion: apps/v1 kind: DaemonSet metadata: name: node-local-dns namespace: kube-system labels: k8s-app: node-local-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: updateStrategy: rollingUpdate: maxUnavailable: 10% selector: matchLabels: k8s-app: node-local-dns template: metadata: labels: k8s-app: node-local-dns annotations: prometheus.io/port: "9253" prometheus.io/scrape: "true" spec: priorityClassName: system-node-critical serviceAccountName: node-local-dns hostNetwork: true dnsPolicy: Default # Don't use cluster DNS. tolerations: - key: "CriticalAddonsOnly" operator: "Exists" - effect: "NoExecute" operator: "Exists" - effect: "NoSchedule" operator: "Exists" containers: - name: node-cache image: k8s.gcr.io/dns/k8s-dns-node-cache:1.17.0 resources: requests: cpu: 100m memory: 50Mi args: [ "-localip", "169.254.20.10,10.96.0.10", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] securityContext: privileged: true ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9253 name: metrics protocol: TCP livenessProbe: httpGet: host: 169.254.20.10 path: /health port: 8080 initialDelaySeconds: 60 timeoutSeconds: 5 volumeMounts: - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - name: config-volume mountPath: /etc/coredns - name: kube-dns-config mountPath: /etc/kube-dns volumes: - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate - name: kube-dns-config configMap: name: kube-dns optional: true - name: config-volume configMap: name: node-local-dns items: - key: Corefile path: Corefile.base --- # A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. # We use this to expose metrics to Prometheus. apiVersion: v1 kind: Service metadata: annotations: prometheus.io/port: "9253" prometheus.io/scrape: "true" labels: k8s-app: node-local-dns name: node-local-dns namespace: kube-system spec: clusterIP: None ports: - name: metrics port: 9253 targetPort: 9253 selector: k8s-app: node-local-dns
部署后检查,是否有169.254.20.10,10.96.0.10
iptables -nvL |grep ":53"