Merge pull request #778 from tpalfalvi/blackbox-exporter

install a blackbox-exporter instance
This commit is contained in:
Frederic Branczyk 2020-12-31 12:15:10 +01:00 committed by GitHub
commit b0e2449a8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 591 additions and 0 deletions

View File

@ -53,6 +53,7 @@ This stack is meant for cluster monitoring, so it is pre-configured to collect m
- [Stripping container resource limits](#stripping-container-resource-limits)
- [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards)
- [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress)
- [Setting up a blackbox exporter](#setting-up-a-blackbox exporter)
- [Minikube Example](#minikube-example)
- [Troubleshooting](#troubleshooting)
- [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics)
@ -223,6 +224,7 @@ local kp =
// serviceMonitor is separated so that it can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
@ -729,6 +731,36 @@ See [developing Prometheus rules and Grafana dashboards](docs/developing-prometh
See [exposing Prometheus/Alertmanager/Grafana](docs/exposing-prometheus-alertmanager-grafana-ingress.md) guide.
### Setting up a blackbox exporter
```jsonnet
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
// ... all necessary mixins ...
{
_config+:: {
// ... configuration for other features ...
blackboxExporter+:: {
modules+:: {
tls_connect: {
prober: 'tcp',
tcp: {
tls: true
}
}
}
}
}
};
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
// ... other rendering blocks ...
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) }
```
Then describe the actual blackbox checks you want to run using `Probe` resources. Specify `blackbox-exporter.<namespace>.svc.cluster.local:9115` as the `spec.prober.url` field of the `Probe` resource.
See the [blackbox exporter guide](docs/blackbox-exporter.md) for the list of configurable options and a complete example.
## Minikube Example
To use an easy to reproduce example, see [minikube.jsonnet](examples/minikube.jsonnet), which uses the minikube setup as demonstrated in [Prerequisites](#prerequisites). Because we would like easy access to our Prometheus, Alertmanager and Grafana UIs, `minikube.jsonnet` exposes the services as NodePort type services.

82
docs/blackbox-exporter.md Normal file
View File

@ -0,0 +1,82 @@
# Setting up a blackbox exporter
The `prometheus-operator` defines a `Probe` resource type that can be used to describe blackbox checks. To execute these, a separate component called [`blackbox_exporter`](https://github.com/prometheus/blackbox_exporter) has to be deployed, which can be scraped to retrieve the results of these checks. You can use `kube-prometheus` to set up such a blackbox exporter within your Kubernetes cluster.
## Adding blackbox exporter manifests to an existing `kube-prometheus` configuration
1. Override blackbox-related configuration parameters as needed.
2. Add the following to the list of renderers to render the blackbox exporter manifests:
```
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) }
```
## Configuration parameters influencing the blackbox exporter
* `_config.namespace`: the namespace where the various generated resources (`ConfigMap`, `Deployment`, `Service`, `ServiceAccount` and `ServiceMonitor`) will reside. This does not affect where you can place `Probe` objects; that is determined by the configuration of the `Prometheus` resource. This option is shared with other `kube-prometheus` components; defaults to `default`.
* `_config.imageRepos.blackboxExporter`: the name of the blackbox exporter image to deploy. Defaults to `quay.io/prometheus/blackbox-exporter`.
* `_config.versions.blackboxExporter`: the tag of the blackbox exporter image to deploy. Defaults to the version `kube-prometheus` was tested with.
* `_config.imageRepos.configmapReloader`: the name of the ConfigMap reloader image to deploy. Defaults to `jimmidyson/configmap-reload`.
* `_config.versions.configmapReloader`: the tag of the ConfigMap reloader image to deploy. Defaults to the version `kube-prometheus` was tested with.
* `_config.resources.blackbox-exporter.requests`: the requested resources; this is used for each container. Defaults to `10m` CPU and `20Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.
* `_config.resources.blackbox-exporter.limits`: the resource limits; this is used for each container. Defaults to `20m` CPU and `40Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.
* `_config.blackboxExporter.port`: the exposed HTTPS port of the exporter. This is where Prometheus should send the probe requests. Defaults to `9115`.
* `_config.blackboxExporter.internalPort`: the internal plaintext port of the exporter. Not accessible from outside the pod. Defaults to `19115`.
* `_config.blackboxExporter.replicas`: the number of exporter replicas to be deployed. Defaults to `1`.
* `_config.blackboxExporter.matchLabels`: map of the labels to be used to select resources belonging to the instance deployed. Defaults to `{ 'app.kubernetes.io/name': 'blackbox-exporter' }`
* `_config.blackboxExporter.assignLabels`: map of the labels applied to components of the instance deployed. Defaults to all the labels included in the `matchLabels` option, and additionally `app.kubernetes.io/version` is set to the version of the blackbox exporter.
* `_config.blackboxExporter.modules`: the modules available in the blackbox exporter installation, i.e. the types of checks it can perform. The default value includes most of the modules defined in the default blackbox exporter configuration: `http_2xx`, `http_post_2xx`, `tcp_connect`, `pop3s_banner`, `ssh_banner`, and `irc_banner`. `icmp` is omitted so the exporter can be run with minimum privileges, but you can add it back if needed - see the example below. See https://github.com/prometheus/blackbox_exporter/blob/master/CONFIGURATION.md for the configuration format, except you have to use JSON instead of YAML here.
* `_config.blackboxExporter.privileged`: whether the `blackbox-exporter` container should be running as non-root (`false`) or root with heavily-restricted capability set (`true`). Defaults to `true` if you have any ICMP modules defined (which need the extra permissions) and `false` otherwise.
## Complete example
```jsonnet
local kp =
(import 'kube-prometheus/kube-prometheus.libsonnet') +
{
_config+:: {
namespace: 'monitoring',
blackboxExporter+:: {
modules+:: {
icmp: {
prober: 'icmp',
},
},
},
},
};
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
} +
// serviceMonitor is separated so that it can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
After installing the generated manifests, you can create `Probe` resources, for example:
```yaml
kind: Probe
apiVersion: monitoring.coreos.com/v1
metadata:
name: example-com-website
namespace: monitoring
spec:
interval: 60s
module: http_2xx
prober:
url: blackbox-exporter.monitoring.svc.cluster.local:9115
targets:
staticConfig:
static:
- http://example.com
- https://example.com
```

View File

@ -34,6 +34,7 @@ local kp =
// serviceMonitor is separated so that it can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +

View File

@ -22,6 +22,7 @@ local kp =
// serviceMonitor is separated so that it can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +

View File

@ -16,6 +16,7 @@ local manifests =
// serviceMonitor is separated so that it can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +

View File

@ -0,0 +1,275 @@
local kubeRbacProxyContainer = import '../kube-rbac-proxy/container.libsonnet';
{
_config+:: {
namespace: 'default',
versions+:: {
blackboxExporter: 'v0.18.0',
configmapReloader: 'v0.4.0',
},
imageRepos+:: {
blackboxExporter: 'quay.io/prometheus/blackbox-exporter',
configmapReloader: 'jimmidyson/configmap-reload',
},
resources+:: {
'blackbox-exporter': {
requests: { cpu: '10m', memory: '20Mi' },
limits: { cpu: '20m', memory: '40Mi' },
},
},
blackboxExporter: {
port: 9115,
internalPort: 19115,
replicas: 1,
matchLabels: {
'app.kubernetes.io/name': 'blackbox-exporter',
},
assignLabels: self.matchLabels {
'app.kubernetes.io/version': $._config.versions.blackboxExporter,
},
modules: {
http_2xx: {
prober: 'http',
},
http_post_2xx: {
prober: 'http',
http: {
method: 'POST',
},
},
tcp_connect: {
prober: 'tcp',
},
pop3s_banner: {
prober: 'tcp',
tcp: {
query_response: [
{ expect: '^+OK' },
],
tls: true,
tls_config: {
insecure_skip_verify: false,
},
},
},
ssh_banner: {
prober: 'tcp',
tcp: {
query_response: [
{ expect: '^SSH-2.0-' },
],
},
},
irc_banner: {
prober: 'tcp',
tcp: {
query_response: [
{ send: 'NICK prober' },
{ send: 'USER prober prober prober :prober' },
{ expect: 'PING :([^ ]+)', send: 'PONG ${1}' },
{ expect: '^:[^ ]+ 001' },
],
},
},
},
privileged:
local icmpModules = [self.modules[m] for m in std.objectFields(self.modules) if self.modules[m].prober == 'icmp'];
std.length(icmpModules) > 0,
},
},
blackboxExporter+::
local bb = $._config.blackboxExporter;
{
configuration: {
apiVersion: 'v1',
kind: 'ConfigMap',
metadata: {
name: 'blackbox-exporter-configuration',
namespace: $._config.namespace,
},
data: {
'config.yml': std.manifestYamlDoc({ modules: bb.modules }),
},
},
serviceAccount: {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: {
name: 'blackbox-exporter',
namespace: $._config.namespace,
},
},
clusterRole: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: {
name: 'blackbox-exporter',
},
rules: [
{
apiGroups: ['authentication.k8s.io'],
resources: ['tokenreviews'],
verbs: ['create'],
},
{
apiGroups: ['authorization.k8s.io'],
resources: ['subjectaccessreviews'],
verbs: ['create'],
},
],
},
clusterRoleBinding: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRoleBinding',
metadata: {
name: 'blackbox-exporter',
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
name: 'blackbox-exporter',
},
subjects: [{
kind: 'ServiceAccount',
name: 'blackbox-exporter',
namespace: $._config.namespace,
}],
},
deployment: {
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: {
name: 'blackbox-exporter',
namespace: $._config.namespace,
labels: bb.assignLabels,
},
spec: {
replicas: bb.replicas,
selector: { matchLabels: bb.matchLabels },
template: {
metadata: { labels: bb.assignLabels },
spec: {
containers: [
{
name: 'blackbox-exporter',
image: $._config.imageRepos.blackboxExporter + ':' + $._config.versions.blackboxExporter,
args: [
'--config.file=/etc/blackbox_exporter/config.yml',
'--web.listen-address=:%d' % bb.internalPort,
],
ports: [{
name: 'http',
containerPort: bb.internalPort,
}],
resources: {
requests: $._config.resources['blackbox-exporter'].requests,
limits: $._config.resources['blackbox-exporter'].limits,
},
securityContext: if bb.privileged then {
runAsNonRoot: false,
capabilities: { drop: ['ALL'], add: ['NET_RAW'] },
} else {
runAsNonRoot: true,
runAsUser: 65534,
},
volumeMounts: [{
mountPath: '/etc/blackbox_exporter/',
name: 'config',
readOnly: true,
}],
},
{
name: 'module-configmap-reloader',
image: $._config.imageRepos.configmapReloader + ':' + $._config.versions.configmapReloader,
args: [
'--webhook-url=http://localhost:%d/-/reload' % bb.internalPort,
'--volume-dir=/etc/blackbox_exporter/',
],
resources: {
requests: $._config.resources['blackbox-exporter'].requests,
limits: $._config.resources['blackbox-exporter'].limits,
},
securityContext: { runAsNonRoot: true, runAsUser: 65534 },
terminationMessagePath: '/dev/termination-log',
terminationMessagePolicy: 'FallbackToLogsOnError',
volumeMounts: [{
mountPath: '/etc/blackbox_exporter/',
name: 'config',
readOnly: true,
}],
},
],
nodeSelector: { 'kubernetes.io/os': 'linux' },
serviceAccountName: 'blackbox-exporter',
volumes: [{
name: 'config',
configMap: { name: 'blackbox-exporter-configuration' },
}],
},
},
},
},
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: {
name: 'blackbox-exporter',
namespace: $._config.namespace,
labels: bb.assignLabels,
},
spec: {
ports: [{ name: 'http', port: bb.port, targetPort: 'https' }],
selector: bb.matchLabels,
},
},
serviceMonitor:
{
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'blackbox-exporter',
namespace: $._config.namespace,
labels: bb.assignLabels,
},
spec: {
endpoints: [{
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
interval: '30s',
path: '/metrics',
port: 'http',
scheme: 'https',
tlsConfig: {
insecureSkipVerify: true,
},
}],
selector: {
matchLabels: bb.matchLabels,
},
},
},
} +
(kubeRbacProxyContainer {
config+:: {
kubeRbacProxy: {
image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
name: 'kube-rbac-proxy',
securePortName: 'https',
securePort: bb.port,
secureListenAddress: ':%d' % self.securePort,
upstream: 'http://127.0.0.1:%d/' % bb.internalPort,
tlsCipherSuites: $._config.tlsCipherSuites,
},
},
}).deploymentMixin,
}

View File

@ -5,6 +5,7 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
(import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') +
(import './node-exporter/node-exporter.libsonnet') +
(import 'github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet') +
(import './blackbox-exporter/blackbox-exporter.libsonnet') +
(import './alertmanager/alertmanager.libsonnet') +
(import 'github.com/prometheus/alertmanager/doc/alertmanager-mixin/mixin.libsonnet') +
(import 'github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheus-operator.libsonnet') +

View File

@ -6,6 +6,13 @@ resources:
- ./manifests/alertmanager-service.yaml
- ./manifests/alertmanager-serviceAccount.yaml
- ./manifests/alertmanager-serviceMonitor.yaml
- ./manifests/blackbox-exporter-clusterRole.yaml
- ./manifests/blackbox-exporter-clusterRoleBinding.yaml
- ./manifests/blackbox-exporter-configuration.yaml
- ./manifests/blackbox-exporter-deployment.yaml
- ./manifests/blackbox-exporter-service.yaml
- ./manifests/blackbox-exporter-serviceAccount.yaml
- ./manifests/blackbox-exporter-serviceMonitor.yaml
- ./manifests/grafana-dashboardDatasources.yaml
- ./manifests/grafana-dashboardDefinitions.yaml
- ./manifests/grafana-dashboardSources.yaml

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: blackbox-exporter
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: blackbox-exporter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: blackbox-exporter
subjects:
- kind: ServiceAccount
name: blackbox-exporter
namespace: monitoring

View File

@ -0,0 +1,38 @@
apiVersion: v1
data:
config.yml: |-
"modules":
"http_2xx":
"prober": "http"
"http_post_2xx":
"http":
"method": "POST"
"prober": "http"
"irc_banner":
"prober": "tcp"
"tcp":
"query_response":
- "send": "NICK prober"
- "send": "USER prober prober prober :prober"
- "expect": "PING :([^ ]+)"
"send": "PONG ${1}"
- "expect": "^:[^ ]+ 001"
"pop3s_banner":
"prober": "tcp"
"tcp":
"query_response":
- "expect": "^+OK"
"tls": true
"tls_config":
"insecure_skip_verify": false
"ssh_banner":
"prober": "tcp"
"tcp":
"query_response":
- "expect": "^SSH-2.0-"
"tcp_connect":
"prober": "tcp"
kind: ConfigMap
metadata:
name: blackbox-exporter-configuration
namespace: monitoring

View File

@ -0,0 +1,84 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/version: v0.18.0
name: blackbox-exporter
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: blackbox-exporter
template:
metadata:
labels:
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/version: v0.18.0
spec:
containers:
- args:
- --config.file=/etc/blackbox_exporter/config.yml
- --web.listen-address=:19115
image: quay.io/prometheus/blackbox-exporter:v0.18.0
name: blackbox-exporter
ports:
- containerPort: 19115
name: http
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
securityContext:
runAsNonRoot: true
runAsUser: 65534
volumeMounts:
- mountPath: /etc/blackbox_exporter/
name: config
readOnly: true
- args:
- --webhook-url=http://localhost:19115/-/reload
- --volume-dir=/etc/blackbox_exporter/
image: jimmidyson/configmap-reload:v0.4.0
name: module-configmap-reloader
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
securityContext:
runAsNonRoot: true
runAsUser: 65534
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/blackbox_exporter/
name: config
readOnly: true
- args:
- --logtostderr
- --secure-listen-address=:9115
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:19115/
image: quay.io/brancz/kube-rbac-proxy:v0.8.0
name: kube-rbac-proxy
ports:
- containerPort: 9115
name: https
securityContext:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: blackbox-exporter
volumes:
- configMap:
name: blackbox-exporter-configuration
name: config

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/version: v0.18.0
name: blackbox-exporter
namespace: monitoring
spec:
ports:
- name: http
port: 9115
targetPort: https
selector:
app.kubernetes.io/name: blackbox-exporter

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: blackbox-exporter
namespace: monitoring

View File

@ -0,0 +1,20 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/version: v0.18.0
name: blackbox-exporter
namespace: monitoring
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 30s
path: /metrics
port: http
scheme: https
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
app.kubernetes.io/name: blackbox-exporter