*: Generate latest Documentation and manifest for Prom v2.5.0

This commit is contained in:
Matthias Loibl 2018-11-07 16:20:23 +01:00
parent 174e84f3e5
commit 2ffb7cffea
5 changed files with 33 additions and 34 deletions

View File

@ -10,10 +10,10 @@ local kp =
config: importstr 'alertmanager-config.yaml', config: importstr 'alertmanager-config.yaml',
}, },
grafana+:: { grafana+:: {
config: { // http://docs.grafana.org/installation/configuration/ config: { // http://docs.grafana.org/installation/configuration/
sections: { sections: {
// Do not require grafana users to login/authenticate // Do not require grafana users to login/authenticate
"auth.anonymous": {enabled: true}, 'auth.anonymous': { enabled: true },
}, },
}, },
}, },
@ -27,13 +27,13 @@ local kp =
// Reference info: https://coreos.com/operators/prometheus/docs/latest/api.html#prometheusspec // Reference info: https://coreos.com/operators/prometheus/docs/latest/api.html#prometheusspec
spec+: { spec+: {
// An e.g. of the purpose of this is so the "Source" links on http://<alert-manager>/#/alerts are valid. // An e.g. of the purpose of this is so the "Source" links on http://<alert-manager>/#/alerts are valid.
externalUrl: "http://192.168.99.100:30900", externalUrl: 'http://192.168.99.100:30900',
// Reference info: "external_labels" on https://prometheus.io/docs/prometheus/latest/configuration/configuration/ // Reference info: "external_labels" on https://prometheus.io/docs/prometheus/latest/configuration/configuration/
externalLabels: { externalLabels: {
// This 'cluster' label will be included on every firing prometheus alert. (This is more useful // This 'cluster' label will be included on every firing prometheus alert. (This is more useful
// when running multiple clusters in a shared environment (e.g. AWS) with other users.) // when running multiple clusters in a shared environment (e.g. AWS) with other users.)
cluster: "minikube-<INSERT YOUR USERNAME HERE>", cluster: 'minikube-<INSERT YOUR USERNAME HERE>',
}, },
}, },
}, },
@ -42,9 +42,9 @@ local kp =
alertmanager+: { alertmanager+: {
// Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec // Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec
spec+: { spec+: {
externalUrl: "http://192.168.99.100:30903", externalUrl: 'http://192.168.99.100:30903',
logLevel: "debug", // So firing alerts show up in log logLevel: 'debug', // So firing alerts show up in log
}, },
}, },
}, },

View File

@ -1,10 +1,9 @@
// Reference info: documentation for https://github.com/ksonnet/ksonnet-lib can be found at http://g.bryan.dev.hepti.center // Reference info: documentation for https://github.com/ksonnet/ksonnet-lib can be found at http://g.bryan.dev.hepti.center
// //
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; // https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k.libsonnet - imports k8s.libsonnet local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; // https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k.libsonnet - imports k8s.libsonnet
// * https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k8s.libsonnet defines things such as "persistentVolumeClaim:: {" // * https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k8s.libsonnet defines things such as "persistentVolumeClaim:: {"
// //
local pvc = k.core.v1.persistentVolumeClaim; // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec') local pvc = k.core.v1.persistentVolumeClaim; // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec')
local kp = local kp =
(import 'kube-prometheus/kube-prometheus.libsonnet') + (import 'kube-prometheus/kube-prometheus.libsonnet') +
@ -16,20 +15,20 @@ local kp =
prometheus+:: { prometheus+:: {
prometheus+: { prometheus+: {
spec+: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec spec+: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
// If a value isn't specified for 'retention', then by default the '--storage.tsdb.retention=24h' arg will be passed to prometheus by prometheus-operator. // If a value isn't specified for 'retention', then by default the '--storage.tsdb.retention=24h' arg will be passed to prometheus by prometheus-operator.
// The possible values for a prometheus <duration> are: // The possible values for a prometheus <duration> are:
// * https://github.com/prometheus/common/blob/c7de230/model/time.go#L178 specifies "^([0-9]+)(y|w|d|h|m|s|ms)$" (years weeks days hours minutes seconds milliseconds) // * https://github.com/prometheus/common/blob/c7de230/model/time.go#L178 specifies "^([0-9]+)(y|w|d|h|m|s|ms)$" (years weeks days hours minutes seconds milliseconds)
retention: "30d", retention: '30d',
// Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md // Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md
// By default (if the following 'storage.volumeClaimTemplate' isn't created), prometheus will be created with an EmptyDir for the 'prometheus-k8s-db' volume (for the prom tsdb). // By default (if the following 'storage.volumeClaimTemplate' isn't created), prometheus will be created with an EmptyDir for the 'prometheus-k8s-db' volume (for the prom tsdb).
// This 'storage.volumeClaimTemplate' causes the following to be automatically created (via dynamic provisioning) for each prometheus pod: // This 'storage.volumeClaimTemplate' causes the following to be automatically created (via dynamic provisioning) for each prometheus pod:
// * PersistentVolumeClaim (and a corresponding PersistentVolume) // * PersistentVolumeClaim (and a corresponding PersistentVolume)
// * the actual volume (per the StorageClassName specified below) // * the actual volume (per the StorageClassName specified below)
storage: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#storagespec storage: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#storagespec
volumeClaimTemplate: // (same link as above where the 'pvc' variable is defined) volumeClaimTemplate: // (same link as above where the 'pvc' variable is defined)
pvc.new() + // http://g.bryan.dev.hepti.center/core/v1/persistentVolumeClaim/#core.v1.persistentVolumeClaim.new pvc.new() + // http://g.bryan.dev.hepti.center/core/v1/persistentVolumeClaim/#core.v1.persistentVolumeClaim.new
pvc.mixin.spec.withAccessModes('ReadWriteOnce') + pvc.mixin.spec.withAccessModes('ReadWriteOnce') +
@ -40,14 +39,14 @@ local kp =
// A StorageClass of the following name (which can be seen via `kubectl get storageclass` from a node in the given K8s cluster) must exist prior to kube-prometheus being deployed. // A StorageClass of the following name (which can be seen via `kubectl get storageclass` from a node in the given K8s cluster) must exist prior to kube-prometheus being deployed.
pvc.mixin.spec.withStorageClassName('ssd'), pvc.mixin.spec.withStorageClassName('ssd'),
// The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning). // The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning).
// And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error: // And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error:
// * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS' // * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS'
//pvc.mixin.spec.selector.withMatchLabels({}), //pvc.mixin.spec.selector.withMatchLabels({}),
}, // storage }, // storage
}, // spec }, // spec
}, // prometheus }, // prometheus
}, // prometheus }, // prometheus
}; };

View File

@ -8,7 +8,7 @@
"subdir": "contrib/kube-prometheus/jsonnet/kube-prometheus" "subdir": "contrib/kube-prometheus/jsonnet/kube-prometheus"
} }
}, },
"version": "6e412f0b4727f3a7a6f097530294409baa6b520a" "version": "fa0a0ae33a16a23845da8ab9973dd4eed50a20df"
}, },
{ {
"name": "ksonnet", "name": "ksonnet",

View File

@ -25,4 +25,4 @@ spec:
serviceAccountName: prometheus-k8s serviceAccountName: prometheus-k8s
serviceMonitorNamespaceSelector: {} serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {} serviceMonitorSelector: {}
version: v2.4.3 version: v2.5.0

View File

@ -3,20 +3,20 @@ local l = import 'kube-prometheus/lib/lib.libsonnet';
local config = kp._config; local config = kp._config;
local makeImages(config) = [ local makeImages(config) = [
{ {
name: config.imageRepos[image], name: config.imageRepos[image],
tag: config.versions[image], tag: config.versions[image],
} }
for image in std.objectFields(config.imageRepos) for image in std.objectFields(config.imageRepos)
]; ];
local upstreamImage(image) = '%s:%s' % [image.name, image.tag]; local upstreamImage(image) = '%s:%s' % [image.name, image.tag];
local downstreamImage(registry, image) = '%s/%s:%s' % [registry, l.imageName(image.name), image.tag]; local downstreamImage(registry, image) = '%s/%s:%s' % [registry, l.imageName(image.name), image.tag];
local pullPush(image, newRegistry) = [ local pullPush(image, newRegistry) = [
'docker pull %s' % upstreamImage(image), 'docker pull %s' % upstreamImage(image),
'docker tag %s %s' % [upstreamImage(image), downstreamImage(newRegistry, image)], 'docker tag %s %s' % [upstreamImage(image), downstreamImage(newRegistry, image)],
'docker push %s' % downstreamImage(newRegistry, image), 'docker push %s' % downstreamImage(newRegistry, image),
]; ];
local images = makeImages(config); local images = makeImages(config);
@ -26,5 +26,5 @@ local output(repository) = std.flattenArrays([
for image in images for image in images
]); ]);
function(repository="my-registry.com/repository") function(repository='my-registry.com/repository')
std.join('\n', output(repository)) std.join('\n', output(repository))