Merge pull request #1230 from Luis-TT/fix-kube-proxy-dashboard

This commit is contained in:
Paweł Krupa 2021-08-04 09:55:09 +02:00 committed by GitHub
commit e931a417fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 68 additions and 0 deletions

View File

@ -70,6 +70,7 @@ If you are migrating from `release-0.7` branch or earlier please read [what chan
- [Authentication problem](#authentication-problem) - [Authentication problem](#authentication-problem)
- [Authorization problem](#authorization-problem) - [Authorization problem](#authorization-problem)
- [kube-state-metrics resource usage](#kube-state-metrics-resource-usage) - [kube-state-metrics resource usage](#kube-state-metrics-resource-usage)
- [Error retrieving kube-proxy metrics](#error-retrieving-kube-proxy-metrics)
- [Contributing](#contributing) - [Contributing](#contributing)
- [License](#license) - [License](#license)
@ -770,6 +771,13 @@ config. They default to:
} }
``` ```
### Error retrieving kube-proxy metrics
By default, kubeadm will configure kube-proxy to listen on 127.0.0.1 for metrics. Because of this prometheus would not be able to scrape these metrics. This would have to be changed to 0.0.0.0 in one of the following two places:
1. Before cluster initialization, the config file passed to kubeadm init should have KubeProxyConfiguration manifest with the field metricsBindAddress set to 0.0.0.0:10249
2. If the k8s cluster is already up and running, we'll have to modify the configmap kube-proxy in the namespace kube-system and set the metricsBindAddress field. After this kube-proxy daemonset would have to be restarted with
`kubectl -n kube-system rollout restart daemonset kube-proxy`
## Contributing ## Contributing
All `.yaml` files in the `/manifests` folder are generated via All `.yaml` files in the `/manifests` folder are generated via

View File

@ -0,0 +1,20 @@
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
kubernetesControlPlane+: {
kubeProxy: true,
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }

View File

@ -22,6 +22,7 @@ local defaults = {
hostNetworkInterfaceSelector: 'device!~"veth.+"', hostNetworkInterfaceSelector: 'device!~"veth.+"',
}, },
}, },
kubeProxy: false,
}; };
function(params) { function(params) {
@ -251,6 +252,45 @@ function(params) {
}, },
}, },
[if (defaults + params).kubeProxy then 'podMonitorKubeProxy']: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PodMonitor',
metadata: {
labels: {
'k8s-app': 'kube-proxy',
},
name: 'kube-proxy',
namespace: k8s._config.namespace,
},
spec: {
jobLabel: 'k8s-app',
namespaceSelector: {
matchNames: [
'kube-system',
],
},
selector: {
matchLabels: {
'k8s-app': 'kube-proxy',
},
},
podMetricsEndpoints: [{
honorLabels: true,
targetPort: 10249,
relabelings: [
{
action: 'replace',
regex: '(.*)',
replacement: '$1',
sourceLabels: ['__meta_kubernetes_pod_node_name'],
targetLabel: 'instance',
},
],
}],
},
},
serviceMonitorCoreDNS: { serviceMonitorCoreDNS: {
apiVersion: 'monitoring.coreos.com/v1', apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor', kind: 'ServiceMonitor',