diff --git a/docs/migration-example/my.release-0.3.jsonnet b/docs/migration-example/my.release-0.3.jsonnet
new file mode 100644
index 00000000..6de7c990
--- /dev/null
+++ b/docs/migration-example/my.release-0.3.jsonnet
@@ -0,0 +1,327 @@
+# Has the following customisations
+# Custom alert manager config
+# Ingresses for the alert manager, prometheus and grafana
+# Grafana admin user password
+# Custom prometheus rules
+# Custom grafana dashboards
+# Custom prometheus config - Data retention, memory, etc.
+# Node exporter role and role binding so we can use a PSP for the node exporter
+
+
+
+# External variables
+# See https://jsonnet.org/learning/tutorial.html
+local cluster_identifier = std.extVar('cluster_identifier');
+local etcd_ip = std.extVar('etcd_ip');
+local etcd_tls_ca = std.extVar('etcd_tls_ca');
+local etcd_tls_cert = std.extVar('etcd_tls_cert');
+local etcd_tls_key = std.extVar('etcd_tls_key');
+local grafana_admin_password = std.extVar('grafana_admin_password');
+local prometheus_data_retention_period = std.extVar('prometheus_data_retention_period');
+local prometheus_request_memory = std.extVar('prometheus_request_memory');
+
+
+# Derived variables
+local alert_manager_host = 'alertmanager.' + cluster_identifier + '.myorg.local';
+local grafana_host = 'grafana.' + cluster_identifier + '.myorg.local';
+local prometheus_host = 'prometheus.' + cluster_identifier + '.myorg.local';
+
+
+# Imports
+local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
+local ingress = k.extensions.v1beta1.ingress;
+local ingressRule = ingress.mixin.spec.rulesType;
+local ingressRuleHttpPath = ingressRule.mixin.http.pathsType;
+local ingressTls = ingress.mixin.spec.tlsType;
+local role = k.rbac.v1.role;
+local roleBinding = k.rbac.v1.roleBinding;
+local roleRulesType = k.rbac.v1.role.rulesType;
+
+
+local kp =
+ (import 'kube-prometheus/kube-prometheus.libsonnet') +
+ (import 'kube-prometheus/kube-prometheus-kubeadm.libsonnet') +
+ (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
+
+ {
+ _config+:: {
+ # Override namespace
+ namespace: 'monitoring',
+
+
+
+
+
+
+
+
+ # Override alert manager config
+ # See https://github.com/coreos/kube-prometheus/tree/master/examples/alertmanager-config-external.jsonnet
+ alertmanager+: {
+ config: importstr 'alertmanager.yaml',
+ },
+
+ # Override etcd config
+ # See https://github.com/coreos/kube-prometheus/blob/master/jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet
+ # See https://github.com/coreos/kube-prometheus/blob/master/examples/etcd-skip-verify.jsonnet
+ etcd+:: {
+ clientCA: etcd_tls_ca,
+ clientCert: etcd_tls_cert,
+ clientKey: etcd_tls_key,
+ ips: [ etcd_ip ],
+ },
+
+ # Override grafana config
+ # anonymous access
+ # See http://docs.grafana.org/installation/configuration/
+ # See http://docs.grafana.org/auth/overview/#anonymous-authentication
+ # admin_password
+ # See http://docs.grafana.org/installation/configuration/#admin-password
+ grafana+:: {
+ config: {
+ sections: {
+ 'auth.anonymous': {
+ enabled: true
+ },
+ security: {
+ admin_password: grafana_admin_password
+ },
+ },
+ },
+
+
+
+ },
+ },
+
+ # Additional grafana dashboards
+ grafanaDashboards+:: {
+ 'my-specific.json': (import 'my-grafana-dashboard-definitions.json'),
+ },
+
+ # Alert manager needs an externalUrl
+ alertmanager+:: {
+ alertmanager+: {
+ spec+: {
+ # See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md
+ # See https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/exposing-prometheus-and-alertmanager.md
+ externalUrl: 'https://' + alert_manager_host,
+ },
+ },
+ },
+
+
+ # Add additional ingresses
+ # See https://github.com/coreos/kube-prometheus/tree/master/examples/ingress.jsonnet
+ ingress+:: {
+ alertmanager:
+ ingress.new() +
+
+
+ ingress.mixin.metadata.withName('alertmanager') +
+ ingress.mixin.metadata.withNamespace($._config.namespace) +
+ ingress.mixin.metadata.withAnnotations({
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ }) +
+
+ ingress.mixin.spec.withRules(
+ ingressRule.new() +
+ ingressRule.withHost(alert_manager_host) +
+ ingressRule.mixin.http.withPaths(
+ ingressRuleHttpPath.new() +
+
+
+
+
+ ingressRuleHttpPath.mixin.backend.withServiceName('alertmanager-operated') +
+
+ ingressRuleHttpPath.mixin.backend.withServicePort(9093)
+ ),
+ ) +
+
+
+ # Note we do not need a TLS secretName here as we are going to use the nginx-ingress default secret which is a wildcard
+ # secretName would need to be in the same namespace at this time, see https://github.com/kubernetes/ingress-nginx/issues/2371
+ ingress.mixin.spec.withTls(
+ ingressTls.new() +
+ ingressTls.withHosts(alert_manager_host)
+ ),
+
+
+ grafana:
+ ingress.new() +
+
+
+ ingress.mixin.metadata.withName('grafana') +
+ ingress.mixin.metadata.withNamespace($._config.namespace) +
+ ingress.mixin.metadata.withAnnotations({
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ }) +
+
+ ingress.mixin.spec.withRules(
+ ingressRule.new() +
+ ingressRule.withHost(grafana_host) +
+ ingressRule.mixin.http.withPaths(
+ ingressRuleHttpPath.new() +
+
+
+
+
+ ingressRuleHttpPath.mixin.backend.withServiceName('grafana') +
+
+ ingressRuleHttpPath.mixin.backend.withServicePort(3000)
+ ),
+ ) +
+
+
+ # Note we do not need a TLS secretName here as we are going to use the nginx-ingress default secret which is a wildcard
+ # secretName would need to be in the same namespace at this time, see https://github.com/kubernetes/ingress-nginx/issues/2371
+ ingress.mixin.spec.withTls(
+ ingressTls.new() +
+ ingressTls.withHosts(grafana_host)
+ ),
+
+
+ prometheus:
+ ingress.new() +
+
+
+ ingress.mixin.metadata.withName('prometheus') +
+ ingress.mixin.metadata.withNamespace($._config.namespace) +
+ ingress.mixin.metadata.withAnnotations({
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ }) +
+ ingress.mixin.spec.withRules(
+ ingressRule.new() +
+
+ ingressRule.withHost(prometheus_host) +
+ ingressRule.mixin.http.withPaths(
+ ingressRuleHttpPath.new() +
+
+
+
+
+ ingressRuleHttpPath.mixin.backend.withServiceName('prometheus-operated') +
+
+ ingressRuleHttpPath.mixin.backend.withServicePort(9090)
+ ),
+ ) +
+
+
+ # Note we do not need a TLS secretName here as we are going to use the nginx-ingress default secret which is a wildcard
+ # secretName would need to be in the same namespace at this time, see https://github.com/kubernetes/ingress-nginx/issues/2371
+ ingress.mixin.spec.withTls(
+ ingressTls.new() +
+ ingressTls.withHosts(prometheus_host)
+ ),
+ },
+
+
+ # Node exporter PSP role and role binding
+ # Add a new top level field for this, the "node-exporter" PSP already exists, so not defining here just referencing
+ # See https://github.com/coreos/prometheus-operator/issues/787
+ nodeExporterPSP: {
+ role:
+ role.new() +
+
+
+ role.mixin.metadata.withName('node-exporter-psp') +
+ role.mixin.metadata.withNamespace($._config.namespace) +
+ role.withRules([
+ roleRulesType.new() +
+ roleRulesType.withApiGroups(['policy']) +
+ roleRulesType.withResources(['podsecuritypolicies']) +
+ roleRulesType.withVerbs(['use']) +
+ roleRulesType.withResourceNames(['node-exporter'])
+ ]),
+
+ roleBinding:
+ roleBinding.new() +
+ roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
+
+
+ roleBinding.mixin.metadata.withName('node-exporter-psp') +
+ roleBinding.mixin.metadata.withNamespace($._config.namespace) +
+
+
+
+ roleBinding.mixin.roleRef.withName('node-exporter-psp') +
+ roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
+
+
+ roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'node-exporter' }]),
+
+
+ },
+
+
+ # Prometheus needs some extra custom config
+ prometheus+:: {
+ prometheus+: {
+ spec+: {
+ # See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
+ externalLabels: {
+ cluster: cluster_identifier,
+ },
+ # See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md
+ # See https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/exposing-prometheus-and-alertmanager.md
+ externalUrl: 'https://' + prometheus_host,
+ # Override reuest memory
+ resources: {
+ requests: {
+ memory: prometheus_request_memory,
+ },
+ },
+ # Override data retention period
+ retention: prometheus_data_retention_period,
+ },
+ },
+ },
+
+
+ # Additional prometheus rules
+ # See https://github.com/coreos/kube-prometheus/docs/developing-prometheus-rules-and-grafana-dashboards.md
+ # cat my-prometheus-rules.yaml | gojsontoyaml -yamltojson | jq . > my-prometheus-rules.json
+ prometheusRules+:: {
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ groups+: import 'my-prometheus-rules.json',
+
+
+ },
+ };
+
+
+# Render
+{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
+
+
+
+
+
+{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
+
+
+{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
+
+{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
+{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
+
+{ [name + '-ingress']: kp.ingress[name] for name in std.objectFields(kp.ingress) } +
+{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
+{ ['node-exporter-psp-' + name]: kp.nodeExporterPSP[name] for name in std.objectFields(kp.nodeExporterPSP) } +
+{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
+{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
+
diff --git a/docs/migration-example/my.release-0.8.jsonnet b/docs/migration-example/my.release-0.8.jsonnet
new file mode 100644
index 00000000..2902590f
--- /dev/null
+++ b/docs/migration-example/my.release-0.8.jsonnet
@@ -0,0 +1,327 @@
+# Has the following customisations
+# Custom alert manager config
+# Ingresses for the alert manager, prometheus and grafana
+# Grafana admin user password
+# Custom prometheus rules
+# Custom grafana dashboards
+# Custom prometheus config - Data retention, memory, etc.
+# Node exporter role and role binding so we can use a PSP for the node exporter
+
+# for help with expected content, see https://github.com/thaum-xyz/ankhmorpork
+
+# External variables
+# See https://jsonnet.org/learning/tutorial.html
+local cluster_identifier = std.extVar('cluster_identifier');
+local etcd_ip = std.extVar('etcd_ip');
+local etcd_tls_ca = std.extVar('etcd_tls_ca');
+local etcd_tls_cert = std.extVar('etcd_tls_cert');
+local etcd_tls_key = std.extVar('etcd_tls_key');
+local grafana_admin_password = std.extVar('grafana_admin_password');
+local prometheus_data_retention_period = std.extVar('prometheus_data_retention_period');
+local prometheus_request_memory = std.extVar('prometheus_request_memory');
+
+
+# Derived variables
+local alert_manager_host = 'alertmanager.' + cluster_identifier + '.myorg.local';
+local grafana_host = 'grafana.' + cluster_identifier + '.myorg.local';
+local prometheus_host = 'prometheus.' + cluster_identifier + '.myorg.local';
+
+
+# ksonnet no longer required
+
+
+
+
+
+
+
+
+
+
+local kp =
+ (import 'kube-prometheus/main.libsonnet') +
+ # kubeadm now achieved by setting platform value - see 9 lines below
+ (import 'kube-prometheus/addons/static-etcd.libsonnet') +
+ (import 'kube-prometheus/addons/podsecuritypolicies.libsonnet') +
+ {
+ values+:: {
+ common+: {
+ namespace: 'monitoring',
+ },
+
+ # Add kubeadm platform-specific items,
+ # including kube-contoller-manager and kube-scheduler discovery
+ kubePrometheus+: {
+ platform: 'kubeadm'
+ },
+
+ # Override alert manager config
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/examples/alertmanager-config-external.jsonnet
+ alertmanager+: {
+ config: importstr 'alertmanager.yaml',
+ },
+
+ # Override etcd config
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/jsonnet/kube-prometheus/addons/static-etcd.libsonnet
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/examples/etcd-skip-verify.jsonnet
+ etcd+:: {
+ clientCA: etcd_tls_ca,
+ clientCert: etcd_tls_cert,
+ clientKey: etcd_tls_key,
+ ips: [ etcd_ip ],
+ },
+
+ # Override grafana config
+ # anonymous access
+ # See http://docs.grafana.org/installation/configuration/
+ # See http://docs.grafana.org/auth/overview/#anonymous-authentication
+ # admin_password
+ # See http://docs.grafana.org/installation/configuration/#admin-password
+ grafana+:: {
+ config: {
+ sections: {
+ 'auth.anonymous': {
+ enabled: true
+ },
+ security: {
+ admin_password: grafana_admin_password
+ },
+ },
+ },
+ # Additional grafana dashboards
+ dashboards+:: {
+ 'my-specific.json': (import 'my-grafana-dashboard-definitions.json'),
+ }
+ },
+ },
+
+
+
+
+
+ # Alert manager needs an externalUrl
+ alertmanager+:: {
+ alertmanager+: {
+ spec+: {
+
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/exposing-prometheus-alertmanager-grafana-ingress.md
+ externalUrl: 'https://' + alert_manager_host,
+ },
+ },
+ },
+
+
+ # Add additional ingresses
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/examples/ingress.jsonnet
+ ingress+:: {
+ 'alertmanager': {
+ apiVersion: 'networking.k8s.io/v1',
+ kind: 'Ingress',
+ metadata: {
+ name: 'alertmanager',
+ namespace: $.values.common.namespace,
+ annotations: {
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ },
+ },
+ spec: {
+ rules: [{
+ host: alert_manager_host,
+ http: {
+ paths: [{
+ path: '/',
+ pathType: 'Prefix',
+ backend: {
+ service: {
+ name: 'alertmanager-operated',
+ port: {
+ number: 9093,
+ },
+ },
+ },
+ }],
+ },
+ }],
+ tls: [{
+
+ hosts: [alert_manager_host],
+ }],
+ },
+ },
+ 'grafana': {
+ apiVersion: 'networking.k8s.io/v1',
+ kind: 'Ingress',
+ metadata: {
+ name: 'grafana',
+ namespace: $.values.common.namespace,
+ annotations: {
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ },
+ },
+ spec: {
+ rules: [{
+ host: grafana_host,
+ http: {
+ paths: [{
+ path: '/',
+ pathType: 'Prefix',
+ backend: {
+ service: {
+ name: 'grafana',
+ port: {
+ number: 3000,
+ },
+ },
+ },
+ }],
+ },
+ }],
+ tls: [{
+
+ hosts: [grafana_host],
+ }],
+ },
+ },
+ 'prometheus': {
+ apiVersion: 'networking.k8s.io/v1',
+ kind: 'Ingress',
+ metadata: {
+ name: 'prometheus',
+ namespace: $.values.common.namespace,
+ annotations: {
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ },
+ },
+ spec: {
+ rules: [{
+ host: prometheus_host,
+ http: {
+ paths: [{
+ path: '/',
+ pathType: 'Prefix',
+ backend: {
+ service: {
+ name: 'prometheus-operated',
+ port: {
+ number: 9090,
+ },
+ },
+ },
+ }],
+ },
+ }],
+ tls: [{
+
+ hosts: [prometheus_host],
+ }],
+ },
+ },
+ },
+
+
+ # Node exporter PSP role and role binding
+ nodeExporter+: {
+ 'psp-role'+: {
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'Role',
+ metadata: {
+ name: 'node-exporter-psp',
+ namespace: $.values.common.namespace,
+ },
+ rules: [{
+ apiGroups: ['policy'],
+ resources: ['podsecuritypolicies'],
+ verbs: ['use']
+ resourceNames: ['node-exporter']
+ }],
+ },
+ 'psp-rolebinding'+: {
+
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'RoleBinding',
+ metadata: {
+ name: 'node-exporter-psp',
+ namespace: $.values.common.namespace,
+ },
+ roleRef: {
+ apiGroup: 'rbac.authorization.k8s.io',
+ name: 'node-exporter-psp',
+ kind: 'Role',
+ },
+ subjects: [{
+ kind: 'ServiceAccount',
+ name: 'node-exporter'
+ }],
+ },
+ },
+
+ # Prometheus needs some extra custom config
+ prometheus+:: {
+ prometheus+: {
+ spec+: {
+
+ externalLabels: {
+ cluster: cluster_identifier,
+ },
+
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/exposing-prometheus-alertmanager-grafana-ingress.md
+ externalUrl: 'https://' + prometheus_host,
+ # Override reuest memory
+ resources: {
+ requests: {
+ memory: prometheus_request_memory,
+ },
+ },
+ # Override data retention period
+ retention: prometheus_data_retention_period,
+ },
+ },
+ },
+
+
+ # Additional prometheus rules
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/developing-prometheus-rules-and-grafana-dashboards.md#pre-rendered-rules
+ # cat my-prometheus-rules.yaml | gojsontoyaml -yamltojson | jq . > my-prometheus-rules.json
+ prometheusMe: {
+ rules: {
+ apiVersion: 'monitoring.coreos.com/v1',
+ kind: 'PrometheusRule',
+ metadata: {
+ name: 'my-prometheus-rule',
+ namespace: $.values.common.namespace,
+ labels: {
+ 'app.kubernetes.io/name': 'kube-prometheus',
+ 'app.kubernetes.io/part-of': 'kube-prometheus',
+ 'prometheus': 'k8s',
+ 'role': 'alert-rules'
+ },
+ },
+ spec: {
+ groups: import 'my-prometheus-rules.json',
+ },
+ },
+ },
+ };
+
+
+# Render
+{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
+{
+ ['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
+ for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
+} +
+// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
+{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
+{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
+{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
+{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
+{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
+{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
+{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
+{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
+{ [name + '-ingress']: kp.ingress[name] for name in std.objectFields(kp.ingress) } +
+{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
+
+{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
+{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
++ { ['prometheus-my-' + name]: kp.prometheusMe[name] for name in std.objectFields(kp.prometheusMe) }
diff --git a/docs/migration-example/readme.md b/docs/migration-example/readme.md
new file mode 100644
index 00000000..0db29d74
--- /dev/null
+++ b/docs/migration-example/readme.md
@@ -0,0 +1,250 @@
+## Example of conversion of a legacy my.jsonnet file
+
+An example conversion of a legacy custom jsonnet file to release-0.8
+format can be seen by viewing and comparing this
+[release-0.3 jsonnet file](./my.release-0.3.jsonnet) (when the github
+repo was under `https://github.com/coreos/kube-prometheus...`)
+and the corresponding [release-0.8 jsonnet file](./my.release-0.8.jsonnet).
+
+These two files have had necessary blank lines added so that they
+can be compared side-by-side and line-by-line on screen.
+
+The conversion covers both the change of stopping using ksonnet after
+release-0.3 and also the major migration after release-0.7 as described in
+[migration-guide.md](../migration-guide.md)
+
+The sample files are intended as an example of format conversion and
+not necessarily best practice for the files in release-0.3 or release-0.8.
+
+Below are three sample extracts of the conversion as an indication of the
+changes required.
+
+
+ release-0.3 |
+ release-0.8 |
+
+
+
+
+```jsonnet
+local kp =
+ (import 'kube-prometheus/kube-prometheus.libsonnet') +
+ (import 'kube-prometheus/kube-prometheus-kubeadm.libsonnet') +
+ (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
+
+ {
+ _config+:: {
+ # Override namespace
+ namespace: 'monitoring',
+
+
+
+
+
+
+
+```
+
+ |
+
+
+```jsonnet
+local kp =
+ (import 'kube-prometheus/main.libsonnet') +
+ # kubeadm now achieved by setting platform value - see 9 lines below
+ (import 'kube-prometheus/addons/static-etcd.libsonnet') +
+ (import 'kube-prometheus/addons/podsecuritypolicies.libsonnet') +
+ {
+ values+:: {
+ common+: {
+ namespace: 'monitoring',
+ },
+
+ # Add kubeadm platform-specific items,
+ # including kube-contoller-manager and kube-scheduler discovery
+ kubePrometheus+: {
+ platform: 'kubeadm'
+ },
+```
+
+ |
+
+
+
+
+ release-0.3 |
+ release-0.8 |
+
+
+
+
+```jsonnet
+ # Add additional ingresses
+ # See https://github.com/coreos/kube-prometheus/...
+ # tree/master/examples/ingress.jsonnet
+ ingress+:: {
+ alertmanager:
+ ingress.new() +
+
+
+ ingress.mixin.metadata.withName('alertmanager') +
+ ingress.mixin.metadata.withNamespace($._config.namespace) +
+ ingress.mixin.metadata.withAnnotations({
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ }) +
+
+ ingress.mixin.spec.withRules(
+ ingressRule.new() +
+ ingressRule.withHost(alert_manager_host) +
+ ingressRule.mixin.http.withPaths(
+ ingressRuleHttpPath.new() +
+
+
+
+
+ ingressRuleHttpPath.mixin.backend
+ .withServiceName('alertmanager-operated') +
+ ingressRuleHttpPath.mixin.backend.withServicePort(9093)
+ ),
+ ) +
+ # Note we do not need a TLS secretName here as we are going to use the
+ # nginx-ingress default secret which is a wildcard
+ # secretName would need to be in the same namespace at this time,
+ # see https://github.com/kubernetes/ingress-nginx/issues/2371
+ ingress.mixin.spec.withTls(
+ ingressTls.new() +
+ ingressTls.withHosts(alert_manager_host)
+ ),
+
+
+```
+
+ |
+
+
+```jsonnet
+ # Add additional ingresses
+ # See https://github.com/prometheus-operator/kube-prometheus/...
+ # blob/main/examples/ingress.jsonnet
+ ingress+:: {
+ 'alertmanager': {
+ apiVersion: 'networking.k8s.io/v1',
+ kind: 'Ingress',
+ metadata: {
+ name: 'alertmanager',
+ namespace: $.values.common.namespace,
+ annotations: {
+ 'kubernetes.io/ingress.class': 'nginx-api',
+ },
+ },
+ spec: {
+ rules: [{
+ host: alert_manager_host,
+ http: {
+ paths: [{
+ path: '/',
+ pathType: 'Prefix',
+ backend: {
+ service: {
+ name: 'alertmanager-operated',
+ port: {
+ number: 9093,
+ },
+ },
+ },
+ }],
+ },
+ }],
+ tls: [{
+
+ hosts: [alert_manager_host],
+ }],
+ },
+ },
+```
+
+ |
+
+
+
+
+ release-0.3 |
+ release-0.8 |
+
+
+
+
+```jsonnet
+ # Additional prometheus rules
+ # See https://github.com/coreos/kube-prometheus/docs/...
+ # developing-prometheus-rules-and-grafana-dashboards.md
+ #
+ # cat my-prometheus-rules.yaml | \
+ # gojsontoyaml -yamltojson | \
+ # jq . > my-prometheus-rules.json
+ prometheusRules+:: {
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ groups+: import 'my-prometheus-rules.json',
+
+
+ },
+ };
+
+
+
+
+```
+
+ |
+
+
+```jsonnet
+ # Additional prometheus rules
+ # See https://github.com/prometheus-operator/kube-prometheus/blob/main/...
+ # docs/developing-prometheus-rules-and-grafana-dashboards.md...
+ # #pre-rendered-rules
+ # cat my-prometheus-rules.yaml | \
+ # gojsontoyaml -yamltojson | \
+ # jq . > my-prometheus-rules.json
+ prometheusMe: {
+ rules: {
+ apiVersion: 'monitoring.coreos.com/v1',
+ kind: 'PrometheusRule',
+ metadata: {
+ name: 'my-prometheus-rule',
+ namespace: $.values.common.namespace,
+ labels: {
+ 'app.kubernetes.io/name': 'kube-prometheus',
+ 'app.kubernetes.io/part-of': 'kube-prometheus',
+ 'prometheus': 'k8s',
+ 'role': 'alert-rules'
+ },
+ },
+ spec: {
+ groups: import 'my-prometheus-rules.json',
+ },
+ },
+ },
+ };
+
+...
+
++ { ['prometheus-my-' + name]: kp.prometheusMe[name] for name in std.objectFields(kp.prometheusMe) }
+```
+
+ |
+
+
diff --git a/docs/migration-guide.md b/docs/migration-guide.md
index f01434cf..a33a8b61 100644
--- a/docs/migration-guide.md
+++ b/docs/migration-guide.md
@@ -61,6 +61,10 @@ This results in creating multiple `PrometheusRule` objects instead of having one
All examples from `examples/` directory were adapted to the new codebase. [Please take a look at them for guideance](https://github.com/prometheus-operator/kube-prometheus/tree/main/examples)
+## Legacy migration
+
+An example of conversion of a legacy release-0.3 my.jsonnet file to release-0.8 can be found in [migration-example](./migration-example)
+
## Advanced usage examples
For more advanced usage examples you can take a look at those two, open to public, implementations: