mirror of
https://github.com/kubernetes-sigs/external-dns.git
synced 2025-08-06 09:36:58 +02:00
Merge branch 'master' into oci-auth-instance-principal
This commit is contained in:
commit
02b7ffa324
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
49
.github/workflows/codeql-analysis.yml
vendored
Normal file
49
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
name: "CodeQL analysis"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '35 13 * * 5'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Install go version
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
- run: |
|
||||
make build
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
49
.github/workflows/lint-test-chart.yaml
vendored
Normal file
49
.github/workflows/lint-test-chart.yaml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
name: Lint and Test Chart
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "charts/external-dns/**"
|
||||
|
||||
jobs:
|
||||
lint-test:
|
||||
if: github.repository == 'kubernetes-sigs/external-dns'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.6.3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.1.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --check-version-increment=false
|
||||
|
||||
- name: Create Kind cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
with:
|
||||
wait: 120s
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install
|
34
.github/workflows/release-chart.yaml
vendored
Normal file
34
.github/workflows/release-chart.yaml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: Release Chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "charts/external-dns/Chart.yaml"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
if: github.repository == 'kubernetes-sigs/external-dns'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.6.3
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.2.1
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "external-dns-helm-chart-{{ .Version }}"
|
19
.github/workflows/trivy.yml
vendored
Normal file
19
.github/workflows/trivy.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
name: trivy vulnerability scanner
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build an image from Dockerfile
|
||||
run: |
|
||||
make build.docker
|
||||
- name: Run trivy
|
||||
run: |
|
||||
./scripts/run-trivy.sh
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -51,3 +51,6 @@ profile.cov
|
||||
|
||||
# github codespaces
|
||||
.venv/
|
||||
|
||||
# Helm charts
|
||||
!/charts/external-dns/
|
||||
|
@ -27,7 +27,7 @@ COPY . .
|
||||
RUN make test build.$ARCH
|
||||
|
||||
# final image
|
||||
FROM $ARCH/alpine:3.13
|
||||
FROM $ARCH/alpine:3.14
|
||||
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY --from=builder /sigs.k8s.io/external-dns/build/external-dns /bin/external-dns
|
||||
|
2
Makefile
2
Makefile
@ -104,7 +104,7 @@ build.push/multiarch:
|
||||
for arch in $(ARCHS); do \
|
||||
image="$(IMAGE):$(VERSION)-$${arch}" ;\
|
||||
# pre-pull due to https://github.com/kubernetes-sigs/cluster-addons/pull/84/files ;\
|
||||
docker pull $${arch}/alpine:3.13 ;\
|
||||
docker pull $${arch}/alpine:3.14 ;\
|
||||
docker pull golang:1.16 ;\
|
||||
DOCKER_BUILDKIT=1 docker build --rm --tag $${image} --build-arg VERSION="$(VERSION)" --build-arg ARCH="$${arch}" . ;\
|
||||
docker push $${image} ;\
|
||||
|
1
OWNERS
1
OWNERS
@ -4,6 +4,7 @@
|
||||
approvers:
|
||||
- raffo
|
||||
- njuettner
|
||||
- seanmalloy
|
||||
|
||||
reviewers:
|
||||
- njuettner
|
||||
|
13
README.md
13
README.md
@ -21,9 +21,9 @@ The [FAQ](docs/faq.md) contains additional information and addresses several que
|
||||
|
||||
To see ExternalDNS in action, have a look at this [video](https://www.youtube.com/watch?v=9HQ2XgL9YVI) or read this [blogpost](https://codemine.be/posts/20190125-devops-eks-externaldns/).
|
||||
|
||||
## The Latest Release: v0.8
|
||||
## The Latest Release
|
||||
|
||||
ExternalDNS' current release is `v0.8`. This version allows you to keep selected zones (via `--domain-filter`) synchronized with Ingresses and Services of `type=LoadBalancer` in various cloud providers:
|
||||
ExternalDNS' allows you to keep selected zones (via `--domain-filter`) synchronized with Ingresses and Services of `type=LoadBalancer` in various cloud providers:
|
||||
* [Google Cloud DNS](https://cloud.google.com/dns/docs/)
|
||||
* [AWS Route 53](https://aws.amazon.com/route53/)
|
||||
* [AWS Cloud Map](https://docs.aws.amazon.com/cloud-map/)
|
||||
@ -110,6 +110,13 @@ The following table clarifies the current status of the providers according to t
|
||||
| GoDaddy | Alpha | |
|
||||
| Gandi | Alpha | @packi |
|
||||
|
||||
## Kubernetes version compatibility
|
||||
|
||||
| ExternalDNS | <= 0.9.x | >= 0.10.0 |
|
||||
| ------------------ | :----------------: | :----------------: |
|
||||
| Kubernetes <= 1.18 | :white_check_mark: | :x: |
|
||||
| Kubernetes >= 1.19 | :x: | :white_check_mark: |
|
||||
|
||||
## Running ExternalDNS:
|
||||
|
||||
The are two ways of running ExternalDNS:
|
||||
@ -139,7 +146,7 @@ The following tutorials are provided:
|
||||
* [Dyn](docs/tutorials/dyn.md)
|
||||
* [Exoscale](docs/tutorials/exoscale.md)
|
||||
* [ExternalName Services](docs/tutorials/externalname.md)
|
||||
* Google Container Engine
|
||||
* Google Kubernetes Engine
|
||||
* [Using Google's Default Ingress Controller](docs/tutorials/gke.md)
|
||||
* [Using the Nginx Ingress Controller](docs/tutorials/nginx-ingress.md)
|
||||
* [Headless Services](docs/tutorials/hostport.md)
|
||||
|
6
charts/OWNERS
Normal file
6
charts/OWNERS
Normal file
@ -0,0 +1,6 @@
|
||||
labels:
|
||||
- chart
|
||||
approvers:
|
||||
- stevehipwell
|
||||
reviewers:
|
||||
- stevehipwell
|
23
charts/external-dns/.helmignore
Normal file
23
charts/external-dns/.helmignore
Normal file
@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
21
charts/external-dns/Chart.yaml
Normal file
21
charts/external-dns/Chart.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
apiVersion: v2
|
||||
name: external-dns
|
||||
description: ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS providers.
|
||||
type: application
|
||||
version: 1.6.0
|
||||
appVersion: 0.10.1
|
||||
keywords:
|
||||
- kubernetes
|
||||
- external-dns
|
||||
- dns
|
||||
home: https://github.com/kubernetes-sigs/external-dns/
|
||||
icon: https://github.com/kubernetes-sigs/external-dns/raw/master/img/external-dns.png
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/external-dns/
|
||||
maintainers:
|
||||
- name: stevehipwell
|
||||
email: steve.hipwell@gmail.com
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- kind: changed
|
||||
description: "Allow specifying Service annotations."
|
67
charts/external-dns/README.md
Normal file
67
charts/external-dns/README.md
Normal file
@ -0,0 +1,67 @@
|
||||
# ExternalDNS
|
||||
|
||||
[ExternalDNS](https://github.com/kubernetes-sigs/external-dns/) synchronizes exposed Kubernetes Services and Ingresses with DNS providers.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Before you can install the chart you will need to add the `external-dns` repo to [Helm](https://helm.sh/).
|
||||
|
||||
```shell
|
||||
helm repo add external-dns https://kubernetes-sigs.github.io/external-dns/
|
||||
```
|
||||
|
||||
After you've installed the repo you can install the chart.
|
||||
|
||||
```shell
|
||||
helm upgrade --install external-dns/external-dns
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The following table lists the configurable parameters of the _ExternalDNS_ chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- |
|
||||
| `image.repository` | Image repository. | `k8s.gcr.io/external-dns/external-dns` |
|
||||
| `image.tag` | Image tag, will override the default tag derived from the chart app version. | `""` |
|
||||
| `image.pullPolicy` | Image pull policy. | `IfNotPresent` |
|
||||
| `imagePullSecrets` | Image pull secrets. | `[]` |
|
||||
| `nameOverride` | Override the `name` of the chart. | `""` |
|
||||
| `fullnameOverride` | Override the `fullname` of the chart. | `""` |
|
||||
| `serviceAccount.create` | If `true`, create a new `serviceaccount`. | `true` |
|
||||
| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` |
|
||||
| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `""` |
|
||||
| `rbac.create` | If `true`, create the RBAC resources. | `true` |
|
||||
| `podLabels` | Labels to add to the pod. | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the pod. | `{}` |
|
||||
| `podSecurityContext` | Security context for the pod, this supports the full [PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#podsecuritycontext-v1-core) API. | _see values.yaml_ |
|
||||
| `securityContext` | Security context for the _external-dns_ container, this supports the full [SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#securitycontext-v1-core) API. | _see values.yaml_ |
|
||||
| `priorityClassName` | Priority class name to use for the pod. | `""` |
|
||||
| `terminationGracePeriodSeconds` | Termination grace period for the pod. | `null` |
|
||||
| `serviceMonitor.enabled` | If `true`, create a _Prometheus_ service monitor. | `false` |
|
||||
| `serviceMonitor.additionalLabels` | Additional labels to be set on the ServiceMonitor. | `{}` |
|
||||
| `serviceMonitor.interval` | _Prometheus_ scrape frequency. | `1m` |
|
||||
| `serviceMonitor.scrapeTimeout` | _Prometheus_ scrape timeout. | `10s` |
|
||||
| `env` | [Environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/) for the _external-dns_ container, this supports the full [EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) API including secrets and configmaps. | `[]` |
|
||||
| `livenessProbe` | [Liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) for the _external-dns_ container, this supports the full [Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) API. | See _values.yaml_ |
|
||||
| `readinessProbe` | [Readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) for the _external-dns_ container, this supports the full [Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) API. | See _values.yaml_ |
|
||||
| `service.annotations` | Annotations to add to the service. | `{}` |
|
||||
| `service.port` | Port to expose via the service. | `7979` |
|
||||
| `extraVolumes` | Additional volumes for the pod, this supports the full [VolumeDevice](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumedevice-v1-core) API. | `[]` |
|
||||
| `extraVolumeMounts` | Additional volume mounts for the _external-dns_ container, this supports the full [VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core) API. | `[]` |
|
||||
| `resources` | Resource requests and limits for the _external-dns_ container, this supports the full [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#resourcerequirements-v1-core) API. | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment. | `{}` |
|
||||
| `tolerations` | Tolerations for pod assignment, this supports the full [Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#toleration-v1-core) API. | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment, this supports the full [Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core) API. | `{}` |
|
||||
| `logLevel` | Verbosity of the logs, available values are: `panic`, `debug`, `info`, `warn`, `error`, `fatal`. | `info` |
|
||||
| `logFormat` | Formats of the logs, available values are: `text`, `json`. | `text` |
|
||||
| `interval` | The interval for DNS updates. | `1m` |
|
||||
| `triggerLoopOnEvent` | When enabled, triggers run loop on create/update/delete events in addition of regular interval. | `false` |
|
||||
| `sources` | K8s resources type to be observed for new DNS entries. | See _values.yaml_ |
|
||||
| `policy` | How DNS records are synchronized between sources and providers, available values are: `sync`, `upsert-only`. | `upsert-only` |
|
||||
| `registry` | Registry Type, available types are: `txt`, `noop`. | `txt` |
|
||||
| `txtOwnerId` | TXT registry identifier. | `""` |
|
||||
| `txtPrefix` | Prefix to create a TXT record with a name following the pattern `prefix.<CNAME record>`. | `""` |
|
||||
| `domainFilters` | Limit possible target zones by domain suffixes. | `[]` |
|
||||
| `provider` | DNS provider where the DNS records will be created, for the available providers and how to configure them see the [README](https://github.com/kubernetes-sigs/external-dns#deploying-to-a-cluster). | `aws` |
|
||||
| `extraArgs` | Extra arguments to pass to the _external-dns_ container, these are needed for provider specific arguments. | `[]` |
|
7
charts/external-dns/templates/NOTES.txt
Normal file
7
charts/external-dns/templates/NOTES.txt
Normal file
@ -0,0 +1,7 @@
|
||||
***********************************************************************
|
||||
* External DNS *
|
||||
***********************************************************************
|
||||
Chart version: {{ .Chart.Version }}
|
||||
App version: {{ .Chart.AppVersion }}
|
||||
Image tag: {{ include "external-dns.image" . }}
|
||||
***********************************************************************
|
69
charts/external-dns/templates/_helpers.tpl
Normal file
69
charts/external-dns/templates/_helpers.tpl
Normal file
@ -0,0 +1,69 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "external-dns.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "external-dns.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "external-dns.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "external-dns.labels" -}}
|
||||
helm.sh/chart: {{ include "external-dns.chart" . }}
|
||||
{{ include "external-dns.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "external-dns.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "external-dns.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "external-dns.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "external-dns.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
The image to use
|
||||
*/}}
|
||||
{{- define "external-dns.image" -}}
|
||||
{{- printf "%s:%s" .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
|
||||
{{- end }}
|
44
charts/external-dns/templates/clusterrole.yaml
Normal file
44
charts/external-dns/templates/clusterrole.yaml
Normal file
@ -0,0 +1,44 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "external-dns.fullname" . }}
|
||||
labels:
|
||||
{{- include "external-dns.labels" . | nindent 4 }}
|
||||
rules:
|
||||
{{- if or (has "node" .Values.sources) (has "pod" .Values.sources) (has "service" .Values.sources) }}
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list","watch"]
|
||||
{{- end }}
|
||||
|
||||
{{- if or (has "pod" .Values.sources) (has "service" .Values.sources) }}
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
{{- end }}
|
||||
|
||||
{{- if has "service" .Values.sources }}
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints"]
|
||||
verbs: ["get","watch","list"]
|
||||
{{- end }}
|
||||
|
||||
{{- if has "ingress" .Values.sources }}
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
{{- end }}
|
||||
|
||||
{{- if has "istio-gateway" .Values.sources }}
|
||||
- apiGroups: ["networking.istio.io"]
|
||||
resources: ["gateways"]
|
||||
verbs: ["get","watch","list"]
|
||||
{{- end }}
|
||||
|
||||
{{- if has "istio-virtualservice" .Values.sources }}
|
||||
- apiGroups: ["networking.istio.io"]
|
||||
resources: ["virtualservices"]
|
||||
verbs: ["get","watch","list"]
|
||||
{{- end }}
|
||||
{{- end }}
|
16
charts/external-dns/templates/clusterrolebinding.yaml
Normal file
16
charts/external-dns/templates/clusterrolebinding.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ printf "%s-viewer" (include "external-dns.fullname" .) }}
|
||||
labels:
|
||||
{{- include "external-dns.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "external-dns.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "external-dns.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
109
charts/external-dns/templates/deployment.yaml
Normal file
109
charts/external-dns/templates/deployment.yaml
Normal file
@ -0,0 +1,109 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "external-dns.fullname" . }}
|
||||
labels:
|
||||
{{- include "external-dns.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "external-dns.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "external-dns.selectorLabels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "external-dns.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.priorityClassName }}
|
||||
priorityClassName: {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.terminationGracePeriodSeconds }}
|
||||
terminationGracePeriodSeconds: {{ . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: external-dns
|
||||
{{- with .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
image: {{ include "external-dns.image" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- with .Values.env }}
|
||||
env:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
args:
|
||||
- --log-level={{ .Values.logLevel }}
|
||||
- --log-format={{ .Values.logFormat }}
|
||||
- --interval={{ .Values.interval }}
|
||||
{{- if .Values.triggerLoopOnEvent }}
|
||||
- --events
|
||||
{{- end }}
|
||||
{{- range .Values.sources }}
|
||||
- --source={{ . }}
|
||||
{{- end }}
|
||||
- --policy={{ .Values.policy }}
|
||||
- --registry={{ .Values.registry }}
|
||||
{{- if eq .Values.registry "txt" }}
|
||||
{{- if .Values.txtOwnerId }}
|
||||
- --txt-owner-id={{ .Values.txtOwnerId }}
|
||||
{{- end }}
|
||||
{{- if .Values.txtPrefix }}
|
||||
- --txt-prefix={{ .Values.txtPrefix }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range .Values.domainFilters }}
|
||||
- --domain-filter={{ . }}
|
||||
{{- end }}
|
||||
- --provider={{ .Values.provider }}
|
||||
{{- range .Values.extraArgs }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 7979
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml .Values.readinessProbe | nindent 12 }}
|
||||
{{- with .Values.extraVolumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.extraVolumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
19
charts/external-dns/templates/service.yaml
Normal file
19
charts/external-dns/templates/service.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "external-dns.fullname" . }}
|
||||
labels:
|
||||
{{- include "external-dns.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
{{- include "external-dns.selectorLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
12
charts/external-dns/templates/serviceaccount.yaml
Normal file
12
charts/external-dns/templates/serviceaccount.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "external-dns.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "external-dns.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
28
charts/external-dns/templates/servicemonitor.yaml
Normal file
28
charts/external-dns/templates/servicemonitor.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
{{- if.Values.serviceMonitor.enabled -}}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "external-dns.fullname" . }}
|
||||
labels:
|
||||
{{- include "external-dns.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: {{ .Release.Name }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "external-dns.selectorLabels" . | nindent 6 }}
|
||||
endpoints:
|
||||
- port: http
|
||||
path: /metrics
|
||||
{{- with .Values.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
110
charts/external-dns/values.yaml
Normal file
110
charts/external-dns/values.yaml
Normal file
@ -0,0 +1,110 @@
|
||||
# Default values for external-dns.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: k8s.gcr.io/external-dns/external-dns
|
||||
# Overrides the image tag whose default is v{{ .Chart.AppVersion }}
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
podLabels: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext:
|
||||
fsGroup: 65534
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
terminationGracePeriodSeconds:
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
additionalLabels: {}
|
||||
interval: 1m
|
||||
scrapeTimeout: 10s
|
||||
|
||||
env: []
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: http
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 2
|
||||
successThreshold: 1
|
||||
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: http
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
|
||||
service:
|
||||
port: 7979
|
||||
annotations: {}
|
||||
|
||||
extraVolumes: []
|
||||
|
||||
extraVolumeMounts: []
|
||||
|
||||
resources: {}
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
logLevel: info
|
||||
logFormat: text
|
||||
|
||||
interval: 1m
|
||||
triggerLoopOnEvent: false
|
||||
|
||||
sources:
|
||||
- service
|
||||
- ingress
|
||||
|
||||
policy: upsert-only
|
||||
|
||||
registry: txt
|
||||
txtOwnerId: ""
|
||||
txtPrefix: ""
|
||||
|
||||
domainFilters: []
|
||||
|
||||
provider: aws
|
||||
|
||||
extraArgs: []
|
@ -3,7 +3,7 @@ timeout: 5000s
|
||||
options:
|
||||
substitution_option: ALLOW_LOOSE
|
||||
steps:
|
||||
- name: "gcr.io/k8s-testimages/gcb-docker-gcloud:v20200824-5d057db"
|
||||
- name: "gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90"
|
||||
entrypoint: make
|
||||
env:
|
||||
- DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
@ -94,6 +94,14 @@ var (
|
||||
Help: "Number of Source errors.",
|
||||
},
|
||||
)
|
||||
verifiedARecords = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "external_dns",
|
||||
Subsystem: "controller",
|
||||
Name: "verified_a_records",
|
||||
Help: "Number of DNS A-records that exists both in source and registry.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -105,6 +113,7 @@ func init() {
|
||||
prometheus.MustRegister(deprecatedRegistryErrors)
|
||||
prometheus.MustRegister(deprecatedSourceErrors)
|
||||
prometheus.MustRegister(controllerNoChangesTotal)
|
||||
prometheus.MustRegister(verifiedARecords)
|
||||
}
|
||||
|
||||
// Controller is responsible for orchestrating the different components.
|
||||
@ -151,7 +160,8 @@ func (c *Controller) RunOnce(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
sourceEndpointsTotal.Set(float64(len(endpoints)))
|
||||
|
||||
vRecords := fetchMatchingARecords(endpoints, records)
|
||||
verifiedARecords.Set(float64(len(vRecords)))
|
||||
endpoints = c.Registry.AdjustEndpoints(endpoints)
|
||||
|
||||
plan := &plan.Plan{
|
||||
@ -160,7 +170,7 @@ func (c *Controller) RunOnce(ctx context.Context) error {
|
||||
Desired: endpoints,
|
||||
DomainFilter: endpoint.MatchAllDomainFilters{c.DomainFilter, c.Registry.GetDomainFilter()},
|
||||
PropertyComparator: c.Registry.PropertyValuesEqual,
|
||||
ManagedRecords: []string{endpoint.RecordTypeA, endpoint.RecordTypeCNAME},
|
||||
ManagedRecords: c.ManagedRecordTypes,
|
||||
}
|
||||
|
||||
plan = plan.Calculate()
|
||||
@ -181,6 +191,32 @@ func (c *Controller) RunOnce(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks and returns the intersection of A records in endpoint and registry.
|
||||
func fetchMatchingARecords(endpoints []*endpoint.Endpoint, registryRecords []*endpoint.Endpoint) []string {
|
||||
aRecords := filterARecords(endpoints)
|
||||
recordsMap := make(map[string]struct{})
|
||||
for _, regRecord := range registryRecords {
|
||||
recordsMap[regRecord.DNSName] = struct{}{}
|
||||
}
|
||||
var cm []string
|
||||
for _, sourceRecord := range aRecords {
|
||||
if _, found := recordsMap[sourceRecord]; found {
|
||||
cm = append(cm, sourceRecord)
|
||||
}
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
func filterARecords(endpoints []*endpoint.Endpoint) []string {
|
||||
var aRecords []string
|
||||
for _, endPoint := range endpoints {
|
||||
if endPoint.RecordType == endpoint.RecordTypeA {
|
||||
aRecords = append(aRecords, endPoint.DNSName)
|
||||
}
|
||||
}
|
||||
return aRecords
|
||||
}
|
||||
|
||||
// ScheduleRunOnce makes sure execution happens at most once per interval.
|
||||
func (c *Controller) ScheduleRunOnce(now time.Time) {
|
||||
c.nextRunAtMux.Lock()
|
||||
|
@ -19,12 +19,15 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/internal/testutils"
|
||||
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
"sigs.k8s.io/external-dns/registry"
|
||||
@ -48,6 +51,10 @@ type filteredMockProvider struct {
|
||||
ApplyChangesCalls []*plan.Changes
|
||||
}
|
||||
|
||||
type errorMockProvider struct {
|
||||
mockProvider
|
||||
}
|
||||
|
||||
func (p *filteredMockProvider) GetDomainFilter() endpoint.DomainFilterInterface {
|
||||
return p.domainFilter
|
||||
}
|
||||
@ -69,6 +76,10 @@ func (p *mockProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error
|
||||
return p.RecordsStore, nil
|
||||
}
|
||||
|
||||
func (p *errorMockProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
return nil, errors.New("error for testing")
|
||||
}
|
||||
|
||||
// ApplyChanges validates that the passed in changes satisfy the assumptions.
|
||||
func (p *mockProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
|
||||
if len(changes.Create) != len(p.ExpectChanges.Create) {
|
||||
@ -119,6 +130,8 @@ func newMockProvider(endpoints []*endpoint.Endpoint, changes *plan.Changes) prov
|
||||
func TestRunOnce(t *testing.T) {
|
||||
// Fake some desired endpoints coming from our source.
|
||||
source := new(testutils.MockSource)
|
||||
cfg := externaldns.NewConfig()
|
||||
cfg.ManagedDNSRecordTypes = []string{endpoint.RecordTypeA, endpoint.RecordTypeCNAME}
|
||||
source.On("Endpoints").Return([]*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "create-record",
|
||||
@ -170,12 +183,20 @@ func TestRunOnce(t *testing.T) {
|
||||
Source: source,
|
||||
Registry: r,
|
||||
Policy: &plan.SyncPolicy{},
|
||||
ManagedRecordTypes: cfg.ManagedDNSRecordTypes,
|
||||
}
|
||||
|
||||
assert.NoError(t, ctrl.RunOnce(context.Background()))
|
||||
|
||||
// Validate that the mock source was called.
|
||||
source.AssertExpectations(t)
|
||||
// check the verified records
|
||||
assert.Equal(t, math.Float64bits(1), valueFromMetric(verifiedARecords))
|
||||
}
|
||||
|
||||
func valueFromMetric(metric prometheus.Gauge) uint64 {
|
||||
ref := reflect.ValueOf(metric)
|
||||
return reflect.Indirect(ref).FieldByName("valBits").Uint()
|
||||
}
|
||||
|
||||
func TestShouldRunOnce(t *testing.T) {
|
||||
@ -219,6 +240,9 @@ func TestShouldRunOnce(t *testing.T) {
|
||||
|
||||
func testControllerFiltersDomains(t *testing.T, configuredEndpoints []*endpoint.Endpoint, domainFilter endpoint.DomainFilterInterface, providerEndpoints []*endpoint.Endpoint, expectedChanges []*plan.Changes) {
|
||||
t.Helper()
|
||||
cfg := externaldns.NewConfig()
|
||||
cfg.ManagedDNSRecordTypes = []string{endpoint.RecordTypeA, endpoint.RecordTypeCNAME}
|
||||
|
||||
source := new(testutils.MockSource)
|
||||
source.On("Endpoints").Return(configuredEndpoints, nil)
|
||||
|
||||
@ -235,6 +259,7 @@ func testControllerFiltersDomains(t *testing.T, configuredEndpoints []*endpoint.
|
||||
Registry: r,
|
||||
Policy: &plan.SyncPolicy{},
|
||||
DomainFilter: domainFilter,
|
||||
ManagedRecordTypes: cfg.ManagedDNSRecordTypes,
|
||||
}
|
||||
|
||||
assert.NoError(t, ctrl.RunOnce(context.Background()))
|
||||
@ -368,3 +393,80 @@ func TestWhenMultipleControllerConsidersAllFilteredComain(t *testing.T) {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestVerifyARecords(t *testing.T) {
|
||||
testControllerFiltersDomains(
|
||||
t,
|
||||
[]*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "create-record.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"1.2.3.4"},
|
||||
},
|
||||
{
|
||||
DNSName: "some-record.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"8.8.8.8"},
|
||||
},
|
||||
},
|
||||
endpoint.NewDomainFilter([]string{"used.tld"}),
|
||||
[]*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "some-record.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"8.8.8.8"},
|
||||
},
|
||||
{
|
||||
DNSName: "create-record.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"1.2.3.4"},
|
||||
},
|
||||
},
|
||||
[]*plan.Changes{},
|
||||
)
|
||||
assert.Equal(t, math.Float64bits(2), valueFromMetric(verifiedARecords))
|
||||
|
||||
testControllerFiltersDomains(
|
||||
t,
|
||||
[]*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "some-record.1.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"1.2.3.4"},
|
||||
},
|
||||
{
|
||||
DNSName: "some-record.2.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"8.8.8.8"},
|
||||
},
|
||||
{
|
||||
DNSName: "some-record.3.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"24.24.24.24"},
|
||||
},
|
||||
},
|
||||
endpoint.NewDomainFilter([]string{"used.tld"}),
|
||||
[]*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "some-record.1.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"1.2.3.4"},
|
||||
},
|
||||
{
|
||||
DNSName: "some-record.2.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"8.8.8.8"},
|
||||
},
|
||||
},
|
||||
[]*plan.Changes{{
|
||||
Create: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "some-record.3.used.tld",
|
||||
RecordType: endpoint.RecordTypeA,
|
||||
Targets: endpoint.Targets{"24.24.24.24"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
)
|
||||
assert.Equal(t, math.Float64bits(2), valueFromMetric(verifiedARecords))
|
||||
}
|
||||
|
5
docs/contributing/chart.md
Normal file
5
docs/contributing/chart.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Helm Chart
|
||||
|
||||
## Chart Changes
|
||||
|
||||
When contributing chart changes please follow the same process as when contributing other content but also please **DON'T** modify _Chart.yaml_ in the PR as this would result in a chart release when merged and will mean that your PR will need modifying before it can be accepted. The chart version will be updated as part of the PR to release the chart.
|
13
docs/faq.md
13
docs/faq.md
@ -2,7 +2,7 @@
|
||||
|
||||
### How is ExternalDNS useful to me?
|
||||
|
||||
You've probably created many deployments. Typically, you expose your deployment to the Internet by creating a Service with `type=LoadBalancer`. Depending on your environment, this usually assigns a random publicly available endpoint to your service that you can access from anywhere in the world. On Google Container Engine, this is a public IP address:
|
||||
You've probably created many deployments. Typically, you expose your deployment to the Internet by creating a Service with `type=LoadBalancer`. Depending on your environment, this usually assigns a random publicly available endpoint to your service that you can access from anywhere in the world. On Google Kubernetes Engine, this is a public IP address:
|
||||
|
||||
```console
|
||||
$ kubectl get svc
|
||||
@ -54,7 +54,7 @@ Yes, you can. Pass in a comma separated list to `--fqdn-template`. Beaware this
|
||||
|
||||
### Which Service and Ingress controllers are supported?
|
||||
|
||||
Regarding Services, we'll support the OSI Layer 4 load balancers that Kubernetes creates on AWS and Google Container Engine, and possibly other clusters running on Google Compute Engine.
|
||||
Regarding Services, we'll support the OSI Layer 4 load balancers that Kubernetes creates on AWS and Google Kubernetes Engine, and possibly other clusters running on Google Compute Engine.
|
||||
|
||||
Regarding Ingress, we'll support:
|
||||
* Google's Ingress Controller on GKE that integrates with their Layer 7 load balancers (GLBC)
|
||||
@ -185,6 +185,8 @@ Here is the full list of available metrics provided by ExternalDNS:
|
||||
| external_dns_registry_errors_total | Number of Registry errors | Counter |
|
||||
| external_dns_source_endpoints_total | Number of Endpoints in the registry | Gauge |
|
||||
| external_dns_source_errors_total | Number of Source errors | Counter |
|
||||
| external_dns_controller_verified_records | Number of DNS A-records that exists both in | Gauge |
|
||||
| | source & registry | |
|
||||
|
||||
### How can I run ExternalDNS under a specific GCP Service Account, e.g. to access DNS records in other projects?
|
||||
|
||||
@ -255,7 +257,7 @@ The internal one should provision hostnames used on the internal network (perhap
|
||||
one to expose DNS to the internet.
|
||||
|
||||
To do this with ExternalDNS you can use the `--annotation-filter` to specifically tie an instance of ExternalDNS to
|
||||
an instance of a ingress controller. Let's assume you have two ingress controllers `nginx-internal` and `nginx-external`
|
||||
an instance of an ingress controller. Let's assume you have two ingress controllers `nginx-internal` and `nginx-external`
|
||||
then you can start two ExternalDNS providers one with `--annotation-filter=kubernetes.io/ingress.class in (nginx-internal)`
|
||||
and one with `--annotation-filter=kubernetes.io/ingress.class in (nginx-external)`.
|
||||
|
||||
@ -265,6 +267,11 @@ If you need to search for multiple values of said annotation, you can provide a
|
||||
Beware when using multiple sources, e.g. `--source=service --source=ingress`, `--annotation-filter` will filter every given source objects.
|
||||
If you need to filter only one specific source you have to run a separated external dns service containing only the wanted `--source` and `--annotation-filter`.
|
||||
|
||||
**Note:** Filtering based on annotation means that the external-dns controller will receive all resources of that kind and then filter on the client-side.
|
||||
In larger clusters with many resources which change frequently this can cause performance issues. If only some resources need to be managed by an instance
|
||||
of external-dns then label filtering can be used instead of annotation filtering. This means that only those resources which match the selector specified
|
||||
in `--label-filter` will be passed to the controller.
|
||||
|
||||
### How do I specify that I want the DNS record to point to either the Node's public or private IP when it has both?
|
||||
|
||||
If your Nodes have both public and private IP addresses, you might want to write DNS records with one or the other.
|
||||
|
@ -24,10 +24,21 @@ You must be an official maintainer of the project to be able to do a release.
|
||||
|
||||
### Steps
|
||||
|
||||
- Run `scripts/releaser.sh` to create a new GitHub release.
|
||||
- Run `scripts/releaser.sh` to create a new GitHub release. Alternatively you can create a release in the GitHub UI making sure to click on the autogenerate release node feature.
|
||||
- The step above will trigger the Kubernetes based CI/CD system [Prow](https://prow.k8s.io/?repo=kubernetes-sigs%2Fexternal-dns). Verify that a new image was built and uploaded to `gcr.io/k8s-staging-external-dns/external-dns`.
|
||||
- Create a PR in the [k8s.io repo](https://github.com/kubernetes/k8s.io) (see https://github.com/kubernetes/k8s.io/pull/540 for reference) by taking the current staging image using the sha256 digest. Once the PR is merged, the image will be live with the corresponding tag specified in the PR.
|
||||
- Verify that the image is pullable with the given tag (i.e. `v0.7.5`).
|
||||
- Branch out from the default branch and run `scripts/kustomize-version-udapter.sh` to update the image tag used in the kustomization.yaml.
|
||||
- Create an issue to release the corresponding Helm chart via the chart release process (below) assigned to a chart maintainer
|
||||
- Create a PR with the kustomize change.
|
||||
- Once the PR is merged, all is done :-)
|
||||
|
||||
## How to release a new chart version
|
||||
|
||||
The chart needs to be released in response to an ExternalDNS image release or on an as-needed basis; this should be triggered by an issue to release the chart.
|
||||
|
||||
### Steps
|
||||
|
||||
- Create a PR to update _Chart.yaml_ with the ExternalDNS version in `appVersion`, agreed on chart release version in `version` and `annotations` showing the changes
|
||||
- Validate that the chart linting is successful
|
||||
- Merge the PR to trigger a GitHub action to release the chart
|
||||
|
@ -464,6 +464,58 @@ $ aws route53 delete-hosted-zone --id /hostedzone/ZEWFWZ4R16P7IB
|
||||
## Throttling
|
||||
|
||||
Route53 has a [5 API requests per second per account hard quota](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-route-53).
|
||||
Running several fast polling ExternalDNS instances in a given account can easily hit that limit. Some ways to circumvent that issue includes:
|
||||
* Augment the synchronization interval (`--interval`), at the cost of slower changes propagation.
|
||||
* If the ExternalDNS managed zones list doesn't change frequently, set `--aws-zones-cache-duration` (zones list cache time-to-live) to a larger value. Note that zones list cache can be disabled with `--aws-zones-cache-duration=0s`.
|
||||
Running several fast polling ExternalDNS instances in a given account can easily hit that limit. Some ways to reduce the request rate include:
|
||||
* Reduce the polling loop's synchronization interval at the possible cost of slower change propagation (but see `--events` below to reduce the impact).
|
||||
* `--interval=5m` (default `1m`)
|
||||
* Trigger the polling loop on changes to K8s objects, rather than only at `interval`, to have responsive updates with long poll intervals
|
||||
* `--events`
|
||||
* Limit the [sources watched](https://github.com/kubernetes-sigs/external-dns/blob/master/pkg/apis/externaldns/types.go#L364) when the `--events` flag is specified to specific types, namespaces, labels, or annotations
|
||||
* `--source=ingress --source=service` - specify multiple times for multiple sources
|
||||
* `--namespace=my-app`
|
||||
* `--label-filter=app in (my-app)`
|
||||
* `--annotation-filter=kubernetes.io/ingress.class in (nginx-external)` - note that this filter would apply to services too..
|
||||
* Limit services watched by type (not applicable to ingress or other types)
|
||||
* `--service-type-filter=LoadBalancer` default `all`
|
||||
* Limit the hosted zones considered
|
||||
* `--zone-id-filter=ABCDEF12345678` - specify multiple times if needed
|
||||
* `--domain-filter=example.com` by domain suffix - specify multiple times if needed
|
||||
* `--regex-domain-filter=example*` by domain suffix but as a regex - overrides domain-filter
|
||||
* `--exclude-domains=ignore.this.example.com` to exclude a domain or subdomain
|
||||
* `--regex-domain-exclusion=ignore*` subtracts it's matches from `regex-domain-filter`'s matches
|
||||
* `--aws-zone-type=public` only sync zones of this type `[public|private]`
|
||||
* `--aws-zone-tags=owner=k8s` only sync zones with this tag
|
||||
* If the list of zones managed by ExternalDNS doesn't change frequently, cache it by setting a TTL.
|
||||
* `--aws-zones-cache-duration=3h` (default `0` - disabled)
|
||||
* Increase the number of changes applied to Route53 in each batch
|
||||
* `--aws-batch-change-size=4000` (default `1000`)
|
||||
* Increase the interval between changes
|
||||
* `--aws-batch-change-interval=10s` (default `1s`)
|
||||
* Introducing some jitter to the pod initialization, so that when multiple instances of ExternalDNS are updated at the same time they do not make their requests on the same second.
|
||||
|
||||
A simple way to implement randomised startup is with an init container:
|
||||
|
||||
```
|
||||
...
|
||||
spec:
|
||||
initContainers:
|
||||
- name: init-jitter
|
||||
image: k8s.gcr.io/external-dns/external-dns:v0.7.6
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- 'FOR=$((RANDOM % 10))s;echo "Sleeping for $FOR";sleep $FOR'
|
||||
containers:
|
||||
...
|
||||
```
|
||||
|
||||
### EKS
|
||||
|
||||
An effective starting point for EKS with an ingress controller might look like:
|
||||
|
||||
```bash
|
||||
--interval=5m
|
||||
--events
|
||||
--source=ingress
|
||||
--domain-filter=example.com
|
||||
--aws-zones-cache-duration=1h
|
||||
```
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Setting up ExternalDNS on Google Container Engine
|
||||
# Setting up ExternalDNS on Google Kubernetes Engine
|
||||
|
||||
This tutorial describes how to setup ExternalDNS for usage within a GKE cluster. Make sure to use **>=0.4** version of ExternalDNS for this tutorial
|
||||
|
||||
@ -123,6 +123,7 @@ spec:
|
||||
- --domain-filter=external-dns-test.gcp.zalan.do # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones
|
||||
- --provider=google
|
||||
# - --google-project=zalando-external-dns-test # Use this to specify a project different from the one external-dns is running inside
|
||||
- --google-zone-visibility=private # Use this to filter to only zones with this visibility. Set to either 'public' or 'private'. Omitting will match public and private zones
|
||||
- --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
|
||||
- --registry=txt
|
||||
- --txt-owner-id=my-identifier
|
||||
|
@ -78,6 +78,7 @@ spec:
|
||||
- --infoblox-wapi-port=443 # (optional) Infoblox WAPI port. The default is "443".
|
||||
- --infoblox-wapi-version=2.3.1 # (optional) Infoblox WAPI version. The default is "2.3.1"
|
||||
- --infoblox-ssl-verify # (optional) Use --no-infoblox-ssl-verify to skip server certificate verification.
|
||||
- --infoblox-create-ptr # (optional) Use --infoblox-create-ptr to create a ptr entry in addition to an entry.
|
||||
env:
|
||||
- name: EXTERNAL_DNS_INFOBLOX_HTTP_POOL_CONNECTIONS
|
||||
value: "10" # (optional) Infoblox WAPI request connection pool size. The default is "10".
|
||||
@ -158,6 +159,7 @@ spec:
|
||||
- --infoblox-wapi-port=443 # (optional) Infoblox WAPI port. The default is "443".
|
||||
- --infoblox-wapi-version=2.3.1 # (optional) Infoblox WAPI version. The default is "2.3.1"
|
||||
- --infoblox-ssl-verify # (optional) Use --no-infoblox-ssl-verify to skip server certificate verification.
|
||||
- --infoblox-create-ptr # (optional) Use --infoblox-create-ptr to create a ptr entry in addition to an entry.
|
||||
env:
|
||||
- name: EXTERNAL_DNS_INFOBLOX_HTTP_POOL_CONNECTIONS
|
||||
value: "10" # (optional) Infoblox WAPI request connection pool size. The default is "10".
|
||||
@ -268,3 +270,11 @@ There is also the ability to filter results from the Infoblox zone_auth service
|
||||
```
|
||||
--infoblox-fqdn-regex=^staging.*test.com$
|
||||
```
|
||||
|
||||
## Infoblox PTR record support
|
||||
|
||||
There is an option to enable PTR records support for infoblox provider. PTR records allow to do reverse dns search. To enable PTR records support, add following into arguments for external-dns:
|
||||
`--infoblox-create-ptr` to allow management of PTR records.
|
||||
You can also add a filter for reverse dns zone to limit PTR records to specific zones only:
|
||||
`--domain-filter=10.196.0.0/16` change this to the reverse zone(s) as defined in your infoblox.
|
||||
Now external-dns will manage PTR records for you.
|
||||
|
@ -2,6 +2,60 @@
|
||||
This tutorial describes how to configure ExternalDNS to use the OpenShift Route source.
|
||||
It is meant to supplement the other provider-specific setup tutorials.
|
||||
|
||||
### For OCP 4.x
|
||||
|
||||
In OCP 4.x, if you have multiple ingress controllers then you must specify an ingress controller name or a router name(you can get it from the route's Status.Ingress.RouterName field).
|
||||
If you don't specify an ingress controller's or router name when you have multiple ingresscontrollers in your environment then the route gets populated with multiple entries of router canonical hostnames which causes external dns to create a CNAME record with multiple router canonical hostnames pointing to the route host which is a violation of RFC 1912 and is not allowed by Cloud Providers which leads to failure of record creation.
|
||||
Once you specify the ingresscontroller or router name then that will be matched by the external-dns and the router canonical hostname corresponding to this routerName(which is present in route's Status.Ingress.RouterName field) is selected and a CNAME record of this route host pointing to this router canonical hostname is created.
|
||||
|
||||
Your externaldns CR shall be created as per the following example.
|
||||
Replace names in the domain section and zone ID as per your environment.
|
||||
This is example is for AWS environment.
|
||||
|
||||
```yaml
|
||||
|
||||
apiVersion: externaldns.olm.openshift.io/v1alpha1
|
||||
kind: ExternalDNS
|
||||
metadata:
|
||||
name: sample1
|
||||
spec:
|
||||
domains:
|
||||
- filterType: Include
|
||||
matchType: Exact
|
||||
names: apps.miheer.externaldns
|
||||
provider:
|
||||
type: AWS
|
||||
source:
|
||||
hostnameAnnotation: Allow
|
||||
openshiftRouteOptions:
|
||||
routerName: default
|
||||
type: OpenShiftRoute
|
||||
zones:
|
||||
- Z05387772BD5723IZFRX3
|
||||
|
||||
```
|
||||
|
||||
This will create an externaldns pod with the following container args under spec in the external-dns namespace where `- --source=openshift-route` and `- --openshift-router-name=default` is added by the external-dns-operator.
|
||||
|
||||
```
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --domain-filter=apps.misalunk.externaldns
|
||||
- --metrics-address=127.0.0.1:7979
|
||||
- --txt-owner-id=external-dns-sample1
|
||||
- --provider=aws
|
||||
- --source=openshift-route
|
||||
- --policy=sync
|
||||
- --registry=txt
|
||||
- --log-level=debug
|
||||
- --zone-id-filter=Z05387772BD5723IZFRX3
|
||||
- --openshift-router-name=default
|
||||
- --txt-prefix=external-dns-
|
||||
|
||||
```
|
||||
|
||||
### For OCP 3.11 environment
|
||||
### Prepare ROUTER_CANONICAL_HOSTNAME in default/router deployment
|
||||
Read and go through [Finding the Host Name of the Router](https://docs.openshift.com/container-platform/3.11/install_config/router/default_haproxy_router.html#finding-router-hostname).
|
||||
If no ROUTER_CANONICAL_HOSTNAME is set, you must annotate each route with external-dns.alpha.kubernetes.io/target!
|
||||
|
@ -237,7 +237,7 @@ spec:
|
||||
```
|
||||
- Then, create service file called 'expose-apple-banana-app.yaml' to expose the services. For more information to deploy ingress controller, refer to (https://kubernetes.github.io/ingress-nginx/deploy/)
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
@ -252,8 +252,10 @@ spec:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
service:
|
||||
name: example-service
|
||||
port:
|
||||
number: 5678
|
||||
```
|
||||
- Then, create the deployment and service:
|
||||
```console
|
||||
@ -298,7 +300,7 @@ $ kubectl delete -f external-dns.yaml
|
||||
ports:
|
||||
- port: 5678 # Default port for image
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
@ -313,8 +315,10 @@ $ kubectl delete -f external-dns.yaml
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
service:
|
||||
name: example-service
|
||||
port:
|
||||
number: 5678
|
||||
```
|
||||
- _Config File Example – Kubernetes cluster service from different cloud vendors_
|
||||
```yaml
|
||||
@ -434,7 +438,7 @@ $ kubectl delete -f external-dns.yaml
|
||||
ports:
|
||||
- port: 5680 # Default port for image
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
@ -449,10 +453,12 @@ $ kubectl delete -f external-dns.yaml
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
service:
|
||||
name: example-service
|
||||
port:
|
||||
number: 5678
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress1
|
||||
@ -467,10 +473,12 @@ $ kubectl delete -f external-dns.yaml
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service1
|
||||
servicePort: 5679
|
||||
service:
|
||||
name: example-service1
|
||||
port:
|
||||
number: 5679
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress2
|
||||
@ -485,8 +493,10 @@ $ kubectl delete -f external-dns.yaml
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service2
|
||||
servicePort: 5680
|
||||
service:
|
||||
name: example-service2
|
||||
port:
|
||||
number: 5680
|
||||
```
|
||||
- _Config File Example – Kubernetes cluster service from different cloud vendors_
|
||||
```yaml
|
||||
@ -572,6 +582,7 @@ $ kubectl delete -f external-dns.yaml
|
||||
ports:
|
||||
- port: 5679 # Default port for image
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
@ -586,10 +597,12 @@ $ kubectl delete -f external-dns.yaml
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
service:
|
||||
name: example-service
|
||||
port:
|
||||
number: 5678
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress1
|
||||
@ -604,8 +617,10 @@ $ kubectl delete -f external-dns.yaml
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service1
|
||||
servicePort: 5679
|
||||
service:
|
||||
name: example-service1
|
||||
port:
|
||||
number: 5679
|
||||
```
|
||||
- Then, create the deployment and service:
|
||||
```console
|
||||
|
@ -35,6 +35,8 @@ const (
|
||||
RecordTypeSRV = "SRV"
|
||||
// RecordTypeNS is a RecordType enum value
|
||||
RecordTypeNS = "NS"
|
||||
// RecordTypePTR is a RecordType enum value
|
||||
RecordTypePTR = "PTR"
|
||||
)
|
||||
|
||||
// TTL is a structure defining the TTL of a DNS record
|
||||
|
88
go.mod
88
go.mod
@ -3,79 +3,83 @@ module sigs.k8s.io/external-dns
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.50.0
|
||||
cloud.google.com/go v0.97.0
|
||||
git.blindage.org/21h/hcloud-dns v0.0.0-20200807003420-f768ffe03f8d
|
||||
github.com/Azure/azure-sdk-for-go v45.1.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.10
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5
|
||||
github.com/Azure/azure-sdk-for-go v46.4.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.21
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0
|
||||
github.com/akamai/AkamaiOPEN-edgegrid-golang v1.0.0
|
||||
github.com/akamai/AkamaiOPEN-edgegrid-golang v1.1.1
|
||||
github.com/StackExchange/dnscontrol v0.2.8
|
||||
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 // indirect
|
||||
github.com/alecthomas/colour v0.1.0 // indirect
|
||||
github.com/alecthomas/kingpin v2.2.5+incompatible
|
||||
github.com/alecthomas/repr v0.0.0-20200325044227-4184120f674c // indirect
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v1.61.357
|
||||
github.com/aws/aws-sdk-go v1.31.4
|
||||
github.com/aws/aws-sdk-go v1.40.53
|
||||
github.com/bodgit/tsig v0.0.2
|
||||
github.com/cloudflare/cloudflare-go v0.10.1
|
||||
github.com/cloudflare/cloudflare-go v0.13.2
|
||||
github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381
|
||||
github.com/datawire/ambassador v1.6.0
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba
|
||||
github.com/digitalocean/godo v1.36.0
|
||||
github.com/digitalocean/godo v1.69.1
|
||||
github.com/dnsimple/dnsimple-go v0.60.0
|
||||
github.com/exoscale/egoscale v0.18.1
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/exoscale/egoscale v0.73.2
|
||||
github.com/ffledgling/pdns-go v0.0.0-20180219074714-524e7daccd99
|
||||
github.com/go-gandi/go-gandi v0.0.0-20200921091836-0d8a64b9cc09
|
||||
github.com/go-logr/logr v1.1.0 // indirect
|
||||
github.com/golang/sync v0.0.0-20180314180146-1d60e4601c6f
|
||||
github.com/google/go-cmp v0.5.2
|
||||
github.com/gophercloud/gophercloud v0.1.0
|
||||
github.com/gorilla/mux v1.7.4 // indirect
|
||||
github.com/hooklift/gowsdl v0.4.0
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/gophercloud/gophercloud v0.21.0
|
||||
github.com/hooklift/gowsdl v0.5.0
|
||||
github.com/infobloxopen/infoblox-go-client v1.1.1
|
||||
github.com/linki/instrumented_http v0.2.0
|
||||
github.com/linode/linodego v0.19.0
|
||||
github.com/maxatome/go-testdeep v1.4.0
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/linki/instrumented_http v0.3.0
|
||||
github.com/linode/linodego v0.32.2
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/maxatome/go-testdeep v1.10.1
|
||||
github.com/miekg/dns v1.1.36-0.20210109083720-731b191cabd1
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/nesv/go-dynect v0.6.0
|
||||
github.com/nic-at/rc0go v1.1.1
|
||||
github.com/onsi/gomega v1.14.0 // indirect
|
||||
github.com/openshift/api v0.0.0-20200605231317-fb2a6ca106ae
|
||||
github.com/openshift/client-go v0.0.0-20200608144219-584632b8fc73
|
||||
github.com/oracle/oci-go-sdk v21.4.0+incompatible
|
||||
github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/projectcontour/contour v1.5.0
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/projectcontour/contour v1.18.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210127161313-bd30bebeac4f
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/smartystreets/gunit v1.3.4 // indirect
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/terra-farm/udnssdk v1.3.5 // indirect
|
||||
github.com/transip/gotransip/v6 v6.6.0
|
||||
github.com/transip/gotransip/v6 v6.6.2
|
||||
github.com/ultradns/ultradns-sdk-go v0.0.0-20200616202852-e62052662f60
|
||||
github.com/vinyldns/go-vinyldns v0.0.0-20200211145900-fe8a3d82e556
|
||||
github.com/vultr/govultr/v2 v2.5.1
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200401174654-e694b7bb0875
|
||||
go.uber.org/ratelimit v0.1.0
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/tools v0.0.0-20200708003708-134513de8882 // indirect
|
||||
google.golang.org/api v0.15.0
|
||||
github.com/vultr/govultr/v2 v2.9.0
|
||||
go.etcd.io/etcd/api/v3 v3.5.0
|
||||
go.etcd.io/etcd/client/v3 v3.5.0
|
||||
go.uber.org/ratelimit v0.2.0
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
|
||||
golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/api v0.58.0
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
gopkg.in/ns1/ns1-go.v2 v2.0.0-20190322154155-0dafb5275fd1
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
istio.io/api v0.0.0-20210128181506-0c4b8e54850f
|
||||
istio.io/client-go v0.0.0-20210128182905-ee2edd059e02
|
||||
k8s.io/api v0.18.8
|
||||
k8s.io/apimachinery v0.18.8
|
||||
k8s.io/client-go v0.18.8
|
||||
k8s.io/kubernetes v1.13.0
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
k8s.io/client-go v0.22.2
|
||||
k8s.io/klog/v2 v2.20.0 // indirect
|
||||
k8s.io/utils v0.0.0-20210820185131-d34e5cb4466e // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/golang/glog => github.com/kubermatic/glog-logrus v0.0.0-20180829085450-3fa5b9870d1d
|
||||
// TODO(jpg): Pin gRPC to work around breaking change until all dependences are upgraded: https://github.com/etcd-io/etcd/issues/11563
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
||||
k8s.io/klog => github.com/mikkeloscar/knolog v0.0.0-20190326191552-80742771eb6b
|
||||
)
|
||||
replace k8s.io/klog/v2 => github.com/Raffo/knolog v0.0.0-20211016155154-e4d5e0cc970a
|
||||
|
@ -9,6 +9,9 @@ rules:
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['ingresses']
|
||||
verbs: ['get', 'watch', 'list']
|
||||
- apiGroups: ['']
|
||||
resources: ['nodes']
|
||||
verbs: ['list']
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["watch", "list"]
|
||||
|
@ -3,7 +3,7 @@ kind: Kustomization
|
||||
|
||||
images:
|
||||
- name: k8s.gcr.io/external-dns/external-dns
|
||||
newTag: v0.8.0
|
||||
newTag: v0.10.0
|
||||
|
||||
resources:
|
||||
- ./external-dns-deployment.yaml
|
||||
|
10
main.go
10
main.go
@ -27,6 +27,7 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
"sigs.k8s.io/external-dns/controller"
|
||||
@ -100,11 +101,14 @@ func main() {
|
||||
go serveMetrics(cfg.MetricsAddress)
|
||||
go handleSigterm(cancel)
|
||||
|
||||
// error is explicitly ignored because the filter is already validated in validation.ValidateConfig
|
||||
labelSelector, _ := labels.Parse(cfg.LabelFilter)
|
||||
|
||||
// Create a source.Config from the flags passed by the user.
|
||||
sourceCfg := &source.Config{
|
||||
Namespace: cfg.Namespace,
|
||||
AnnotationFilter: cfg.AnnotationFilter,
|
||||
LabelFilter: cfg.LabelFilter,
|
||||
LabelFilter: labelSelector,
|
||||
FQDNTemplate: cfg.FQDNTemplate,
|
||||
CombineFQDNAndAnnotation: cfg.CombineFQDNAndAnnotation,
|
||||
IgnoreHostnameAnnotation: cfg.IgnoreHostnameAnnotation,
|
||||
@ -128,6 +132,7 @@ func main() {
|
||||
SkipperRouteGroupVersion: cfg.SkipperRouteGroupVersion,
|
||||
RequestTimeout: cfg.RequestTimeout,
|
||||
DefaultTargets: cfg.DefaultTargets,
|
||||
OCPRouterName: cfg.OCPRouterName,
|
||||
}
|
||||
|
||||
// Lookup all the selected sources by names and pass them the desired configuration.
|
||||
@ -219,7 +224,7 @@ func main() {
|
||||
case "rcodezero":
|
||||
p, err = rcode0.NewRcodeZeroProvider(domainFilter, cfg.DryRun, cfg.RcodezeroTXTEncrypt)
|
||||
case "google":
|
||||
p, err = google.NewGoogleProvider(ctx, cfg.GoogleProject, domainFilter, zoneIDFilter, cfg.GoogleBatchChangeSize, cfg.GoogleBatchChangeInterval, cfg.DryRun)
|
||||
p, err = google.NewGoogleProvider(ctx, cfg.GoogleProject, domainFilter, zoneIDFilter, cfg.GoogleBatchChangeSize, cfg.GoogleBatchChangeInterval, cfg.GoogleZoneVisibility, cfg.DryRun)
|
||||
case "digitalocean":
|
||||
p, err = digitalocean.NewDigitalOceanProvider(ctx, domainFilter, cfg.DryRun, cfg.DigitalOceanAPIPageSize)
|
||||
case "hetzner":
|
||||
@ -245,6 +250,7 @@ func main() {
|
||||
MaxResults: cfg.InfobloxMaxResults,
|
||||
DryRun: cfg.DryRun,
|
||||
FQDNRexEx: cfg.InfobloxFQDNRegEx,
|
||||
CreatePTR: cfg.InfobloxCreatePTR,
|
||||
},
|
||||
)
|
||||
case "dyn":
|
||||
|
@ -23,6 +23,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
|
||||
"github.com/alecthomas/kingpin"
|
||||
@ -67,6 +69,7 @@ type Config struct {
|
||||
GoogleProject string
|
||||
GoogleBatchChangeSize int
|
||||
GoogleBatchChangeInterval time.Duration
|
||||
GoogleZoneVisibility string
|
||||
DomainFilter []string
|
||||
ExcludeDomains []string
|
||||
RegexDomainFilter *regexp.Regexp
|
||||
@ -108,6 +111,7 @@ type Config struct {
|
||||
InfobloxView string
|
||||
InfobloxMaxResults int
|
||||
InfobloxFQDNRegEx string
|
||||
InfobloxCreatePTR bool
|
||||
DynCustomerName string
|
||||
DynUsername string
|
||||
DynPassword string `secure:"yes"`
|
||||
@ -173,6 +177,7 @@ type Config struct {
|
||||
GoDaddySecretKey string `secure:"yes"`
|
||||
GoDaddyTTL int64
|
||||
GoDaddyOTE bool
|
||||
OCPRouterName string
|
||||
}
|
||||
|
||||
var defaultConfig = &Config{
|
||||
@ -186,7 +191,7 @@ var defaultConfig = &Config{
|
||||
Sources: nil,
|
||||
Namespace: "",
|
||||
AnnotationFilter: "",
|
||||
LabelFilter: "",
|
||||
LabelFilter: labels.Everything().String(),
|
||||
FQDNTemplate: "",
|
||||
CombineFQDNAndAnnotation: false,
|
||||
IgnoreHostnameAnnotation: false,
|
||||
@ -200,6 +205,7 @@ var defaultConfig = &Config{
|
||||
GoogleProject: "",
|
||||
GoogleBatchChangeSize: 1000,
|
||||
GoogleBatchChangeInterval: time.Second,
|
||||
GoogleZoneVisibility: "",
|
||||
DomainFilter: []string{},
|
||||
ExcludeDomains: []string{},
|
||||
RegexDomainFilter: regexp.MustCompile(""),
|
||||
@ -237,6 +243,7 @@ var defaultConfig = &Config{
|
||||
InfobloxView: "",
|
||||
InfobloxMaxResults: 0,
|
||||
InfobloxFQDNRegEx: "",
|
||||
InfobloxCreatePTR: false,
|
||||
OCIConfigFile: "/etc/kubernetes/oci.yaml",
|
||||
InMemoryZones: []string{},
|
||||
OVHEndpoint: "ovh-eu",
|
||||
@ -357,11 +364,12 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
// Flags related to Skipper RouteGroup
|
||||
app.Flag("skipper-routegroup-groupversion", "The resource version for skipper routegroup").Default(source.DefaultRoutegroupVersion).StringVar(&cfg.SkipperRouteGroupVersion)
|
||||
|
||||
// Flags related to processing sources
|
||||
// Flags related to processing source
|
||||
app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, contour-httpproxy, gloo-proxy, crd, empty, skipper-routegroup, openshift-route, ambassador-host, kong-tcpingress)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "pod", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "contour-httpproxy", "gloo-proxy", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route", "ambassador-host", "kong-tcpingress")
|
||||
app.Flag("openshift-router-name", "if source is openshift-route then you can pass the ingress controller name. Based on this name external-dns will select the respective router from the route status and map that routerCanonicalHostname to the route host while creating a CNAME record.").StringVar(&cfg.OCPRouterName)
|
||||
app.Flag("namespace", "Limit sources of endpoints to a specific namespace (default: all namespaces)").Default(defaultConfig.Namespace).StringVar(&cfg.Namespace)
|
||||
app.Flag("annotation-filter", "Filter sources managed by external-dns via annotation using label selector semantics (default: all sources)").Default(defaultConfig.AnnotationFilter).StringVar(&cfg.AnnotationFilter)
|
||||
app.Flag("label-filter", "Filter sources managed by external-dns via label selector when listing all resources; currently only supported by source CRD").Default(defaultConfig.LabelFilter).StringVar(&cfg.LabelFilter)
|
||||
app.Flag("label-filter", "Filter sources managed by external-dns via label selector when listing all resources; currently supported by source types CRD, ingress, service and openshift-route").Default(defaultConfig.LabelFilter).StringVar(&cfg.LabelFilter)
|
||||
app.Flag("fqdn-template", "A templated string that's used to generate DNS names from sources that don't define a hostname themselves, or to add a hostname suffix when paired with the fake source (optional). Accepts comma separated list for multiple global FQDN.").Default(defaultConfig.FQDNTemplate).StringVar(&cfg.FQDNTemplate)
|
||||
app.Flag("combine-fqdn-annotation", "Combine FQDN template and Annotations instead of overwriting").BoolVar(&cfg.CombineFQDNAndAnnotation)
|
||||
app.Flag("ignore-hostname-annotation", "Ignore hostname annotation when generating DNS names, valid only when using fqdn-template is set (optional, default: false)").BoolVar(&cfg.IgnoreHostnameAnnotation)
|
||||
@ -389,6 +397,7 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
app.Flag("google-project", "When using the Google provider, current project is auto-detected, when running on GCP. Specify other project with this. Must be specified when running outside GCP.").Default(defaultConfig.GoogleProject).StringVar(&cfg.GoogleProject)
|
||||
app.Flag("google-batch-change-size", "When using the Google provider, set the maximum number of changes that will be applied in each batch.").Default(strconv.Itoa(defaultConfig.GoogleBatchChangeSize)).IntVar(&cfg.GoogleBatchChangeSize)
|
||||
app.Flag("google-batch-change-interval", "When using the Google provider, set the interval between batch changes.").Default(defaultConfig.GoogleBatchChangeInterval.String()).DurationVar(&cfg.GoogleBatchChangeInterval)
|
||||
app.Flag("google-zone-visibility", "When using the Google provider, filter for zones with this visibility (optional, options: public, private)").Default(defaultConfig.GoogleZoneVisibility).EnumVar(&cfg.GoogleZoneVisibility, "", "public", "private")
|
||||
app.Flag("alibaba-cloud-config-file", "When using the Alibaba Cloud provider, specify the Alibaba Cloud configuration file (required when --provider=alibabacloud").Default(defaultConfig.AlibabaCloudConfigFile).StringVar(&cfg.AlibabaCloudConfigFile)
|
||||
app.Flag("alibaba-cloud-zone-type", "When using the Alibaba Cloud provider, filter for zones of this type (optional, options: public, private)").Default(defaultConfig.AlibabaCloudZoneType).EnumVar(&cfg.AlibabaCloudZoneType, "", "public", "private")
|
||||
app.Flag("aws-zone-type", "When using the AWS provider, filter for zones of this type (optional, options: public, private)").Default(defaultConfig.AWSZoneType).EnumVar(&cfg.AWSZoneType, "", "public", "private")
|
||||
@ -423,6 +432,7 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
app.Flag("infoblox-view", "DNS view (default: \"\")").Default(defaultConfig.InfobloxView).StringVar(&cfg.InfobloxView)
|
||||
app.Flag("infoblox-max-results", "Add _max_results as query parameter to the URL on all API requests. The default is 0 which means _max_results is not set and the default of the server is used.").Default(strconv.Itoa(defaultConfig.InfobloxMaxResults)).IntVar(&cfg.InfobloxMaxResults)
|
||||
app.Flag("infoblox-fqdn-regex", "Apply this regular expression as a filter for obtaining zone_auth objects. This is disabled by default.").Default(defaultConfig.InfobloxFQDNRegEx).StringVar(&cfg.InfobloxFQDNRegEx)
|
||||
app.Flag("infoblox-create-ptr", "When using the Infoblox provider, create a ptr entry in addition to an entry").Default(strconv.FormatBool(defaultConfig.InfobloxCreatePTR)).BoolVar(&cfg.InfobloxCreatePTR)
|
||||
app.Flag("dyn-customer-name", "When using the Dyn provider, specify the Customer Name").Default("").StringVar(&cfg.DynCustomerName)
|
||||
app.Flag("dyn-username", "When using the Dyn provider, specify the Username").Default("").StringVar(&cfg.DynUsername)
|
||||
app.Flag("dyn-password", "When using the Dyn provider, specify the password").Default("").StringVar(&cfg.DynPassword)
|
||||
|
@ -46,6 +46,7 @@ var (
|
||||
GoogleProject: "",
|
||||
GoogleBatchChangeSize: 1000,
|
||||
GoogleBatchChangeInterval: time.Second,
|
||||
GoogleZoneVisibility: "",
|
||||
DomainFilter: []string{""},
|
||||
ExcludeDomains: []string{""},
|
||||
RegexDomainFilter: regexp.MustCompile(""),
|
||||
@ -114,6 +115,7 @@ var (
|
||||
DigitalOceanAPIPageSize: 50,
|
||||
ManagedDNSRecordTypes: []string{endpoint.RecordTypeA, endpoint.RecordTypeCNAME},
|
||||
RFC2136BatchChangeSize: 50,
|
||||
OCPRouterName: "default",
|
||||
}
|
||||
|
||||
overriddenConfig = &Config{
|
||||
@ -134,6 +136,7 @@ var (
|
||||
GoogleProject: "project",
|
||||
GoogleBatchChangeSize: 100,
|
||||
GoogleBatchChangeInterval: time.Second * 2,
|
||||
GoogleZoneVisibility: "private",
|
||||
DomainFilter: []string{"example.org", "company.com"},
|
||||
ExcludeDomains: []string{"xapi.example.org", "xapi.company.com"},
|
||||
RegexDomainFilter: regexp.MustCompile("(example\\.org|company\\.com)$"),
|
||||
@ -223,6 +226,7 @@ func TestParseFlags(t *testing.T) {
|
||||
args: []string{
|
||||
"--source=service",
|
||||
"--provider=google",
|
||||
"--openshift-router-name=default",
|
||||
},
|
||||
envVars: map[string]string{},
|
||||
expected: minimalConfig,
|
||||
@ -249,6 +253,7 @@ func TestParseFlags(t *testing.T) {
|
||||
"--google-project=project",
|
||||
"--google-batch-change-size=100",
|
||||
"--google-batch-change-interval=2s",
|
||||
"--google-zone-visibility=private",
|
||||
"--azure-config-file=azure.json",
|
||||
"--azure-resource-group=arg",
|
||||
"--azure-subscription-id=arg",
|
||||
@ -351,6 +356,7 @@ func TestParseFlags(t *testing.T) {
|
||||
"EXTERNAL_DNS_GOOGLE_PROJECT": "project",
|
||||
"EXTERNAL_DNS_GOOGLE_BATCH_CHANGE_SIZE": "100",
|
||||
"EXTERNAL_DNS_GOOGLE_BATCH_CHANGE_INTERVAL": "2s",
|
||||
"EXTERNAL_DNS_GOOGLE_ZONE_VISIBILITY": "private",
|
||||
"EXTERNAL_DNS_AZURE_CONFIG_FILE": "azure.json",
|
||||
"EXTERNAL_DNS_AZURE_RESOURCE_GROUP": "arg",
|
||||
"EXTERNAL_DNS_AZURE_SUBSCRIPTION_ID": "arg",
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
|
||||
)
|
||||
|
||||
@ -110,5 +112,9 @@ func ValidateConfig(cfg *externaldns.Config) error {
|
||||
return errors.New("txt-prefix and txt-suffix are mutual exclusive")
|
||||
}
|
||||
|
||||
_, err := labels.Parse(cfg.LabelFilter)
|
||||
if err != nil {
|
||||
return errors.New("--label-filter does not specify a valid label selector")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -414,14 +414,9 @@ func (p *AWSProvider) doRecords(ctx context.Context, action string, endpoints []
|
||||
return errors.Wrapf(err, "failed to list zones, aborting %s doRecords action", action)
|
||||
}
|
||||
|
||||
records, err := p.records(ctx, zones)
|
||||
if err != nil {
|
||||
log.Errorf("failed to list records while preparing %s doRecords action: %s", action, err)
|
||||
}
|
||||
|
||||
p.AdjustEndpoints(endpoints)
|
||||
|
||||
return p.submitChanges(ctx, p.newChanges(action, endpoints, records, zones), zones)
|
||||
return p.submitChanges(ctx, p.newChanges(action, endpoints), zones)
|
||||
}
|
||||
|
||||
// UpdateRecords updates a given set of old records to a new set of records in a given hosted zone.
|
||||
@ -431,15 +426,10 @@ func (p *AWSProvider) UpdateRecords(ctx context.Context, updates, current []*end
|
||||
return errors.Wrapf(err, "failed to list zones, aborting UpdateRecords")
|
||||
}
|
||||
|
||||
records, err := p.records(ctx, zones)
|
||||
if err != nil {
|
||||
log.Errorf("failed to list records while preparing UpdateRecords: %s", err)
|
||||
}
|
||||
|
||||
return p.submitChanges(ctx, p.createUpdateChanges(updates, current, records, zones), zones)
|
||||
return p.submitChanges(ctx, p.createUpdateChanges(updates, current), zones)
|
||||
}
|
||||
|
||||
func (p *AWSProvider) createUpdateChanges(newEndpoints, oldEndpoints []*endpoint.Endpoint, recordsCache []*endpoint.Endpoint, zones map[string]*route53.HostedZone) []*route53.Change {
|
||||
func (p *AWSProvider) createUpdateChanges(newEndpoints, oldEndpoints []*endpoint.Endpoint) []*route53.Change {
|
||||
var deletes []*endpoint.Endpoint
|
||||
var creates []*endpoint.Endpoint
|
||||
var updates []*endpoint.Endpoint
|
||||
@ -459,9 +449,9 @@ func (p *AWSProvider) createUpdateChanges(newEndpoints, oldEndpoints []*endpoint
|
||||
}
|
||||
|
||||
combined := make([]*route53.Change, 0, len(deletes)+len(creates)+len(updates))
|
||||
combined = append(combined, p.newChanges(route53.ChangeActionCreate, creates, recordsCache, zones)...)
|
||||
combined = append(combined, p.newChanges(route53.ChangeActionUpsert, updates, recordsCache, zones)...)
|
||||
combined = append(combined, p.newChanges(route53.ChangeActionDelete, deletes, recordsCache, zones)...)
|
||||
combined = append(combined, p.newChanges(route53.ChangeActionCreate, creates)...)
|
||||
combined = append(combined, p.newChanges(route53.ChangeActionUpsert, updates)...)
|
||||
combined = append(combined, p.newChanges(route53.ChangeActionDelete, deletes)...)
|
||||
return combined
|
||||
}
|
||||
|
||||
@ -487,20 +477,11 @@ func (p *AWSProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) e
|
||||
return errors.Wrap(err, "failed to list zones, not applying changes")
|
||||
}
|
||||
|
||||
records, ok := ctx.Value(provider.RecordsContextKey).([]*endpoint.Endpoint)
|
||||
if !ok {
|
||||
var err error
|
||||
records, err = p.records(ctx, zones)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get records while preparing to applying changes: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
updateChanges := p.createUpdateChanges(changes.UpdateNew, changes.UpdateOld, records, zones)
|
||||
updateChanges := p.createUpdateChanges(changes.UpdateNew, changes.UpdateOld)
|
||||
|
||||
combinedChanges := make([]*route53.Change, 0, len(changes.Delete)+len(changes.Create)+len(updateChanges))
|
||||
combinedChanges = append(combinedChanges, p.newChanges(route53.ChangeActionCreate, changes.Create, records, zones)...)
|
||||
combinedChanges = append(combinedChanges, p.newChanges(route53.ChangeActionDelete, changes.Delete, records, zones)...)
|
||||
combinedChanges = append(combinedChanges, p.newChanges(route53.ChangeActionCreate, changes.Create)...)
|
||||
combinedChanges = append(combinedChanges, p.newChanges(route53.ChangeActionDelete, changes.Delete)...)
|
||||
combinedChanges = append(combinedChanges, updateChanges...)
|
||||
|
||||
return p.submitChanges(ctx, combinedChanges, zones)
|
||||
@ -567,11 +548,11 @@ func (p *AWSProvider) submitChanges(ctx context.Context, changes []*route53.Chan
|
||||
}
|
||||
|
||||
// newChanges returns a collection of Changes based on the given records and action.
|
||||
func (p *AWSProvider) newChanges(action string, endpoints []*endpoint.Endpoint, recordsCache []*endpoint.Endpoint, zones map[string]*route53.HostedZone) []*route53.Change {
|
||||
func (p *AWSProvider) newChanges(action string, endpoints []*endpoint.Endpoint) []*route53.Change {
|
||||
changes := make([]*route53.Change, 0, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
change, dualstack := p.newChange(action, endpoint, recordsCache, zones)
|
||||
change, dualstack := p.newChange(action, endpoint)
|
||||
changes = append(changes, change)
|
||||
if dualstack {
|
||||
// make a copy of change, modify RRS type to AAAA, then add new change
|
||||
@ -619,7 +600,7 @@ func (p *AWSProvider) AdjustEndpoints(endpoints []*endpoint.Endpoint) []*endpoin
|
||||
// returned Change is based on the given record by the given action, e.g.
|
||||
// action=ChangeActionCreate returns a change for creation of the record and
|
||||
// action=ChangeActionDelete returns a change for deletion of the record.
|
||||
func (p *AWSProvider) newChange(action string, ep *endpoint.Endpoint, recordsCache []*endpoint.Endpoint, zones map[string]*route53.HostedZone) (*route53.Change, bool) {
|
||||
func (p *AWSProvider) newChange(action string, ep *endpoint.Endpoint) (*route53.Change, bool) {
|
||||
change := &route53.Change{
|
||||
Action: aws.String(action),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
|
@ -501,7 +501,7 @@ func TestAWSApplyChanges(t *testing.T) {
|
||||
setup func(p *AWSProvider) context.Context
|
||||
listRRSets int
|
||||
}{
|
||||
{"no cache", func(p *AWSProvider) context.Context { return context.Background() }, 3},
|
||||
{"no cache", func(p *AWSProvider) context.Context { return context.Background() }, 0},
|
||||
{"cached", func(p *AWSProvider) context.Context {
|
||||
ctx := context.Background()
|
||||
records, err := p.Records(ctx)
|
||||
@ -781,7 +781,7 @@ func TestAWSsubmitChanges(t *testing.T) {
|
||||
zones, _ := provider.Zones(ctx)
|
||||
records, _ := provider.Records(ctx)
|
||||
cs := make([]*route53.Change, 0, len(endpoints))
|
||||
cs = append(cs, provider.newChanges(route53.ChangeActionCreate, endpoints, records, zones)...)
|
||||
cs = append(cs, provider.newChanges(route53.ChangeActionCreate, endpoints)...)
|
||||
|
||||
require.NoError(t, provider.submitChanges(ctx, cs, zones))
|
||||
|
||||
@ -798,11 +798,9 @@ func TestAWSsubmitChangesError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
zones, err := provider.Zones(ctx)
|
||||
require.NoError(t, err)
|
||||
records, err := provider.Records(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep := endpoint.NewEndpointWithTTL("fail.zone-1.ext-dns-test-2.teapot.zalan.do", endpoint.RecordTypeA, endpoint.TTL(recordTTL), "1.0.0.1")
|
||||
cs := provider.newChanges(route53.ChangeActionCreate, []*endpoint.Endpoint{ep}, records, zones)
|
||||
cs := provider.newChanges(route53.ChangeActionCreate, []*endpoint.Endpoint{ep})
|
||||
|
||||
require.Error(t, provider.submitChanges(ctx, cs, zones))
|
||||
}
|
||||
|
@ -18,12 +18,10 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@ -446,59 +444,6 @@ func testAzureApplyChangesInternal(t *testing.T, dryRun bool, client RecordSetsC
|
||||
}
|
||||
}
|
||||
|
||||
func TestAzureGetAccessToken(t *testing.T) {
|
||||
env := azure.PublicCloud
|
||||
cfg := config{
|
||||
ClientID: "",
|
||||
ClientSecret: "",
|
||||
TenantID: "",
|
||||
UseManagedIdentityExtension: false,
|
||||
}
|
||||
|
||||
_, err := getAccessToken(cfg, env)
|
||||
if err == nil {
|
||||
t.Fatalf("expected to fail, but got no error")
|
||||
}
|
||||
|
||||
// Expect to use managed identity in this case
|
||||
cfg = config{
|
||||
ClientID: "msi",
|
||||
ClientSecret: "msi",
|
||||
TenantID: "cefe8aef-5127-4d65-a299-012053f81f60",
|
||||
UserAssignedIdentityID: "userAssignedIdentityClientID",
|
||||
UseManagedIdentityExtension: true,
|
||||
}
|
||||
token, err := getAccessToken(cfg, env)
|
||||
if err != nil {
|
||||
t.Fatalf("expected to construct a token successfully, but got error %v", err)
|
||||
}
|
||||
_, err = token.MarshalJSON()
|
||||
if err == nil ||
|
||||
!strings.Contains(err.Error(), "marshalling ServicePrincipalMSISecret is not supported") {
|
||||
t.Fatalf("expected to fail to marshal token, but got %v", err)
|
||||
}
|
||||
|
||||
// Expect to use SPN in this case
|
||||
cfg = config{
|
||||
ClientID: "SPNClientID",
|
||||
ClientSecret: "SPNSecret",
|
||||
TenantID: "cefe8aef-5127-4d65-a299-012053f81f60",
|
||||
UserAssignedIdentityID: "userAssignedIdentityClientID",
|
||||
UseManagedIdentityExtension: true,
|
||||
}
|
||||
token, err = getAccessToken(cfg, env)
|
||||
if err != nil {
|
||||
t.Fatalf("expected to construct a token successfully, but got error %v", err)
|
||||
}
|
||||
innerToken, err := token.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatalf("expected to marshal token successfully, but got error %v", err)
|
||||
}
|
||||
if !strings.Contains(string(innerToken), "SPNClientID") {
|
||||
t.Fatalf("expect the clientID of the token is SPNClientID, but got token %s", string(innerToken))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAzureNameFilter(t *testing.T) {
|
||||
provider, err := newMockedAzureProvider(endpoint.NewDomainFilter([]string{"nginx.example.com"}), endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, "k8s", "",
|
||||
&[]dns.Zone{
|
||||
|
@ -103,14 +103,13 @@ func getAccessToken(cfg config, environment azure.Environment) (*adal.ServicePri
|
||||
// Try to retrieve token with MSI.
|
||||
if cfg.UseManagedIdentityExtension {
|
||||
log.Info("Using managed identity extension to retrieve access token for Azure API.")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get the managed service identity endpoint: %v", err)
|
||||
}
|
||||
|
||||
if cfg.UserAssignedIdentityID != "" {
|
||||
log.Infof("Resolving to user assigned identity, client id is %s.", cfg.UserAssignedIdentityID)
|
||||
token, err := adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, environment.ServiceManagementEndpoint, cfg.UserAssignedIdentityID)
|
||||
token, err := adal.NewServicePrincipalTokenFromManagedIdentity(environment.ServiceManagementEndpoint, &adal.ManagedIdentityOptions{
|
||||
ClientID: cfg.UserAssignedIdentityID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create the managed service identity token: %v", err)
|
||||
}
|
||||
@ -118,7 +117,7 @@ func getAccessToken(cfg config, environment azure.Environment) (*adal.ServicePri
|
||||
}
|
||||
|
||||
log.Info("Resolving to system assigned identity.")
|
||||
token, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, environment.ServiceManagementEndpoint)
|
||||
token, err := adal.NewServicePrincipalTokenFromManagedIdentity(environment.ServiceManagementEndpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create the managed service identity token: %v", err)
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ func (p *CloudFlareProvider) submitChanges(ctx context.Context, changes []*cloud
|
||||
}
|
||||
err := p.Client.UpdateDNSRecord(zoneID, recordID, change.ResourceRecord)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).Errorf("failed to delete record: %v", err)
|
||||
log.WithFields(logFields).Errorf("failed to update record: %v", err)
|
||||
}
|
||||
} else if change.Action == cloudFlareDelete {
|
||||
recordID := p.getRecordID(records, change.ResourceRecord)
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
etcdcv3 "go.etcd.io/etcd/clientv3"
|
||||
etcdcv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
|
@ -35,6 +35,21 @@ import (
|
||||
|
||||
type mockDigitalOceanClient struct{}
|
||||
|
||||
func (m *mockDigitalOceanClient) RecordsByName(context.Context, string, string, *godo.ListOptions) ([]godo.DomainRecord, *godo.Response, error) {
|
||||
// not used, here only to correctly implement the interface
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDigitalOceanClient) RecordsByTypeAndName(context.Context, string, string, string, *godo.ListOptions) ([]godo.DomainRecord, *godo.Response, error) {
|
||||
// not used, here only to correctly implement the interface
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDigitalOceanClient) RecordsByType(context.Context, string, string, *godo.ListOptions) ([]godo.DomainRecord, *godo.Response, error) {
|
||||
// not used, here only to correctly implement the interface
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDigitalOceanClient) List(ctx context.Context, opt *godo.ListOptions) ([]godo.Domain, *godo.Response, error) {
|
||||
if opt == nil || opt.Page == 0 {
|
||||
return []godo.Domain{{Name: "foo.com"}, {Name: "example.com"}}, &godo.Response{
|
||||
@ -112,6 +127,21 @@ func (m *mockDigitalOceanClient) Records(ctx context.Context, domain string, opt
|
||||
|
||||
type mockDigitalOceanRecordsFail struct{}
|
||||
|
||||
func (m *mockDigitalOceanRecordsFail) RecordsByName(context.Context, string, string, *godo.ListOptions) ([]godo.DomainRecord, *godo.Response, error) {
|
||||
// not used, here only to correctly implement the interface
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDigitalOceanRecordsFail) RecordsByTypeAndName(context.Context, string, string, string, *godo.ListOptions) ([]godo.DomainRecord, *godo.Response, error) {
|
||||
// not used, here only to correctly implement the interface
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDigitalOceanRecordsFail) RecordsByType(context.Context, string, string, *godo.ListOptions) ([]godo.DomainRecord, *godo.Response, error) {
|
||||
// not used, here only to correctly implement the interface
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDigitalOceanRecordsFail) List(context.Context, *godo.ListOptions) ([]godo.Domain, *godo.Response, error) {
|
||||
return []godo.Domain{{Name: "foo.com"}, {Name: "bar.com"}}, nil, nil
|
||||
}
|
||||
|
@ -110,6 +110,8 @@ type GoogleProvider struct {
|
||||
batchChangeInterval time.Duration
|
||||
// only consider hosted zones managing domains ending in this suffix
|
||||
domainFilter endpoint.DomainFilter
|
||||
// filter for zones based on visibility
|
||||
zoneTypeFilter provider.ZoneTypeFilter
|
||||
// only consider hosted zones ending with this zone id
|
||||
zoneIDFilter provider.ZoneIDFilter
|
||||
// A client for managing resource record sets
|
||||
@ -123,7 +125,7 @@ type GoogleProvider struct {
|
||||
}
|
||||
|
||||
// NewGoogleProvider initializes a new Google CloudDNS based Provider.
|
||||
func NewGoogleProvider(ctx context.Context, project string, domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, batchChangeSize int, batchChangeInterval time.Duration, dryRun bool) (*GoogleProvider, error) {
|
||||
func NewGoogleProvider(ctx context.Context, project string, domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, batchChangeSize int, batchChangeInterval time.Duration, zoneVisibility string, dryRun bool) (*GoogleProvider, error) {
|
||||
gcloud, err := google.DefaultClient(ctx, dns.NdevClouddnsReadwriteScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -143,11 +145,14 @@ func NewGoogleProvider(ctx context.Context, project string, domainFilter endpoin
|
||||
|
||||
if project == "" {
|
||||
mProject, mErr := metadata.ProjectID()
|
||||
if mErr == nil {
|
||||
if mErr != nil {
|
||||
return nil, fmt.Errorf("failed to auto-detect the project id: %w", mErr)
|
||||
}
|
||||
log.Infof("Google project auto-detected: %s", mProject)
|
||||
project = mProject
|
||||
}
|
||||
}
|
||||
|
||||
zoneTypeFilter := provider.NewZoneTypeFilter(zoneVisibility)
|
||||
|
||||
provider := &GoogleProvider{
|
||||
project: project,
|
||||
@ -155,6 +160,7 @@ func NewGoogleProvider(ctx context.Context, project string, domainFilter endpoin
|
||||
batchChangeSize: batchChangeSize,
|
||||
batchChangeInterval: batchChangeInterval,
|
||||
domainFilter: domainFilter,
|
||||
zoneTypeFilter: zoneTypeFilter,
|
||||
zoneIDFilter: zoneIDFilter,
|
||||
resourceRecordSetsClient: resourceRecordSetsService{dnsClient.ResourceRecordSets},
|
||||
managedZonesClient: managedZonesService{dnsClient.ManagedZones},
|
||||
@ -171,11 +177,11 @@ func (p *GoogleProvider) Zones(ctx context.Context) (map[string]*dns.ManagedZone
|
||||
|
||||
f := func(resp *dns.ManagedZonesListResponse) error {
|
||||
for _, zone := range resp.ManagedZones {
|
||||
if p.domainFilter.Match(zone.DnsName) && (p.zoneIDFilter.Match(fmt.Sprintf("%v", zone.Id)) || p.zoneIDFilter.Match(fmt.Sprintf("%v", zone.Name))) {
|
||||
if p.domainFilter.Match(zone.DnsName) && p.zoneTypeFilter.Match(zone.Visibility) && (p.zoneIDFilter.Match(fmt.Sprintf("%v", zone.Id)) || p.zoneIDFilter.Match(fmt.Sprintf("%v", zone.Name))) {
|
||||
zones[zone.Name] = zone
|
||||
log.Debugf("Matched %s (zone: %s)", zone.DnsName, zone.Name)
|
||||
log.Debugf("Matched %s (zone: %s) (visibility: %s)", zone.DnsName, zone.Name, zone.Visibility)
|
||||
} else {
|
||||
log.Debugf("Filtered %s (zone: %s)", zone.DnsName, zone.Name)
|
||||
log.Debugf("Filtered %s (zone: %s) (visibility: %s)", zone.DnsName, zone.Name, zone.Visibility)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -194,24 +194,46 @@ func hasTrailingDot(target string) bool {
|
||||
}
|
||||
|
||||
func TestGoogleZonesIDFilter(t *testing.T) {
|
||||
provider := newGoogleProviderZoneOverlap(t, endpoint.NewDomainFilter([]string{"cluster.local."}), provider.NewZoneIDFilter([]string{"10002"}), false, []*endpoint.Endpoint{})
|
||||
provider := newGoogleProviderZoneOverlap(t, endpoint.NewDomainFilter([]string{"cluster.local."}), provider.NewZoneIDFilter([]string{"10002"}), provider.NewZoneTypeFilter(""), false, []*endpoint.Endpoint{})
|
||||
|
||||
zones, err := provider.Zones(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
validateZones(t, zones, map[string]*dns.ManagedZone{
|
||||
"internal-2": {Name: "internal-2", DnsName: "cluster.local.", Id: 10002},
|
||||
"internal-2": {Name: "internal-2", DnsName: "cluster.local.", Id: 10002, Visibility: "private"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoogleZonesNameFilter(t *testing.T) {
|
||||
provider := newGoogleProviderZoneOverlap(t, endpoint.NewDomainFilter([]string{"cluster.local."}), provider.NewZoneIDFilter([]string{"internal-2"}), false, []*endpoint.Endpoint{})
|
||||
provider := newGoogleProviderZoneOverlap(t, endpoint.NewDomainFilter([]string{"cluster.local."}), provider.NewZoneIDFilter([]string{"internal-2"}), provider.NewZoneTypeFilter(""), false, []*endpoint.Endpoint{})
|
||||
|
||||
zones, err := provider.Zones(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
validateZones(t, zones, map[string]*dns.ManagedZone{
|
||||
"internal-2": {Name: "internal-2", DnsName: "cluster.local.", Id: 10002},
|
||||
"internal-2": {Name: "internal-2", DnsName: "cluster.local.", Id: 10002, Visibility: "private"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoogleZonesVisibilityFilterPublic(t *testing.T) {
|
||||
provider := newGoogleProviderZoneOverlap(t, endpoint.NewDomainFilter([]string{"cluster.local."}), provider.NewZoneIDFilter([]string{"split-horizon-1"}), provider.NewZoneTypeFilter("public"), false, []*endpoint.Endpoint{})
|
||||
|
||||
zones, err := provider.Zones(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
validateZones(t, zones, map[string]*dns.ManagedZone{
|
||||
"split-horizon-1": {Name: "split-horizon-1", DnsName: "cluster.local.", Id: 10001, Visibility: "public"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoogleZonesVisibilityFilterPrivate(t *testing.T) {
|
||||
provider := newGoogleProviderZoneOverlap(t, endpoint.NewDomainFilter([]string{"cluster.local."}), provider.NewZoneIDFilter([]string{"split-horizon-1"}), provider.NewZoneTypeFilter("public"), false, []*endpoint.Endpoint{})
|
||||
|
||||
zones, err := provider.Zones(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
validateZones(t, zones, map[string]*dns.ManagedZone{
|
||||
"split-horizon-1": {Name: "split-horizon-1", DnsName: "cluster.local.", Id: 10001, Visibility: "public"},
|
||||
})
|
||||
}
|
||||
|
||||
@ -650,6 +672,7 @@ func validateZones(t *testing.T, zones map[string]*dns.ManagedZone, expected map
|
||||
func validateZone(t *testing.T, zone *dns.ManagedZone, expected *dns.ManagedZone) {
|
||||
assert.Equal(t, expected.Name, zone.Name)
|
||||
assert.Equal(t, expected.DnsName, zone.DnsName)
|
||||
assert.Equal(t, expected.Visibility, zone.Visibility)
|
||||
}
|
||||
|
||||
func validateChange(t *testing.T, change *dns.Change, expected *dns.Change) {
|
||||
@ -672,12 +695,13 @@ func validateChangeRecord(t *testing.T, record *dns.ResourceRecordSet, expected
|
||||
assert.Equal(t, expected.Type, record.Type)
|
||||
}
|
||||
|
||||
func newGoogleProviderZoneOverlap(t *testing.T, domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool, records []*endpoint.Endpoint) *GoogleProvider {
|
||||
func newGoogleProviderZoneOverlap(t *testing.T, domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, zoneTypeFilter provider.ZoneTypeFilter, dryRun bool, records []*endpoint.Endpoint) *GoogleProvider {
|
||||
provider := &GoogleProvider{
|
||||
project: "zalando-external-dns-test",
|
||||
dryRun: false,
|
||||
domainFilter: domainFilter,
|
||||
zoneIDFilter: zoneIDFilter,
|
||||
zoneTypeFilter: zoneTypeFilter,
|
||||
resourceRecordSetsClient: &mockResourceRecordSetsClient{},
|
||||
managedZonesClient: &mockManagedZonesClient{},
|
||||
changesClient: &mockChangesClient{},
|
||||
@ -687,18 +711,35 @@ func newGoogleProviderZoneOverlap(t *testing.T, domainFilter endpoint.DomainFilt
|
||||
Name: "internal-1",
|
||||
DnsName: "cluster.local.",
|
||||
Id: 10001,
|
||||
Visibility: "private",
|
||||
})
|
||||
|
||||
createZone(t, provider, &dns.ManagedZone{
|
||||
Name: "internal-2",
|
||||
DnsName: "cluster.local.",
|
||||
Id: 10002,
|
||||
Visibility: "private",
|
||||
})
|
||||
|
||||
createZone(t, provider, &dns.ManagedZone{
|
||||
Name: "internal-3",
|
||||
DnsName: "cluster.local.",
|
||||
Id: 10003,
|
||||
Visibility: "private",
|
||||
})
|
||||
|
||||
createZone(t, provider, &dns.ManagedZone{
|
||||
Name: "split-horizon-1",
|
||||
DnsName: "cluster.local.",
|
||||
Id: 10004,
|
||||
Visibility: "public",
|
||||
})
|
||||
|
||||
createZone(t, provider, &dns.ManagedZone{
|
||||
Name: "split-horizon-1",
|
||||
DnsName: "cluster.local.",
|
||||
Id: 10004,
|
||||
Visibility: "private",
|
||||
})
|
||||
|
||||
provider.dryRun = dryRun
|
||||
|
@ -19,12 +19,14 @@ package infoblox
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
transform "github.com/StackExchange/dnscontrol/pkg/transform"
|
||||
ibclient "github.com/infobloxopen/infoblox-go-client"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
@ -33,6 +35,11 @@ import (
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
)
|
||||
|
||||
const (
|
||||
// provider specific key to track if PTR record was already created or not for A records
|
||||
providerSpecificInfobloxPtrRecord = "infoblox-ptr-record-exists"
|
||||
)
|
||||
|
||||
// InfobloxConfig clarifies the method signature
|
||||
type InfobloxConfig struct {
|
||||
DomainFilter endpoint.DomainFilter
|
||||
@ -47,6 +54,7 @@ type InfobloxConfig struct {
|
||||
View string
|
||||
MaxResults int
|
||||
FQDNRexEx string
|
||||
CreatePTR bool
|
||||
}
|
||||
|
||||
// InfobloxProvider implements the DNS provider for Infoblox.
|
||||
@ -58,6 +66,7 @@ type InfobloxProvider struct {
|
||||
view string
|
||||
dryRun bool
|
||||
fqdnRegEx string
|
||||
createPTR bool
|
||||
}
|
||||
|
||||
type infobloxRecordSet struct {
|
||||
@ -143,6 +152,7 @@ func NewInfobloxProvider(infobloxConfig InfobloxConfig) (*InfobloxProvider, erro
|
||||
dryRun: infobloxConfig.DryRun,
|
||||
view: infobloxConfig.View,
|
||||
fqdnRegEx: infobloxConfig.FQDNRexEx,
|
||||
createPTR: infobloxConfig.CreatePTR,
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
@ -170,6 +180,9 @@ func (p *InfobloxProvider) Records(ctx context.Context) (endpoints []*endpoint.E
|
||||
}
|
||||
for _, res := range resA {
|
||||
newEndpoint := endpoint.NewEndpoint(res.Name, endpoint.RecordTypeA, res.Ipv4Addr)
|
||||
if p.createPTR {
|
||||
newEndpoint.WithProviderSpecific(providerSpecificInfobloxPtrRecord, "false")
|
||||
}
|
||||
// Check if endpoint already exists and add to existing endpoint if it does
|
||||
foundExisting := false
|
||||
for _, ep := range endpoints {
|
||||
@ -203,7 +216,13 @@ func (p *InfobloxProvider) Records(ctx context.Context) (endpoints []*endpoint.E
|
||||
}
|
||||
for _, res := range resH {
|
||||
for _, ip := range res.Ipv4Addrs {
|
||||
endpoints = append(endpoints, endpoint.NewEndpoint(res.Name, endpoint.RecordTypeA, ip.Ipv4Addr))
|
||||
// host record is an abstraction in infoblox that combines A and PTR records
|
||||
// for any host record we already should have a PTR record in infoblox, so mark it as created
|
||||
newEndpoint := endpoint.NewEndpoint(res.Name, endpoint.RecordTypeA, ip.Ipv4Addr)
|
||||
if p.createPTR {
|
||||
newEndpoint.WithProviderSpecific(providerSpecificInfobloxPtrRecord, "true")
|
||||
}
|
||||
endpoints = append(endpoints, newEndpoint)
|
||||
}
|
||||
}
|
||||
|
||||
@ -222,6 +241,29 @@ func (p *InfobloxProvider) Records(ctx context.Context) (endpoints []*endpoint.E
|
||||
endpoints = append(endpoints, endpoint.NewEndpoint(res.Name, endpoint.RecordTypeCNAME, res.Canonical))
|
||||
}
|
||||
|
||||
if p.createPTR {
|
||||
// infoblox doesn't accept reverse zone's fqdn, and instead expects .in-addr.arpa zone
|
||||
// so convert our zone fqdn (if it is a correct cidr block) into in-addr.arpa address and pass that into infoblox
|
||||
// example: 10.196.38.0/24 becomes 38.196.10.in-addr.arpa
|
||||
arpaZone, err := transform.ReverseDomainName(zone.Fqdn)
|
||||
if err == nil {
|
||||
var resP []ibclient.RecordPTR
|
||||
objP := ibclient.NewRecordPTR(
|
||||
ibclient.RecordPTR{
|
||||
Zone: arpaZone,
|
||||
View: p.view,
|
||||
},
|
||||
)
|
||||
err = p.client.GetObject(objP, "", &resP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch PTR records from zone '%s': %s", zone.Fqdn, err)
|
||||
}
|
||||
for _, res := range resP {
|
||||
endpoints = append(endpoints, endpoint.NewEndpoint(res.PtrdName, endpoint.RecordTypePTR, res.Ipv4Addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var resT []ibclient.RecordTXT
|
||||
objT := ibclient.NewRecordTXT(
|
||||
ibclient.RecordTXT{
|
||||
@ -242,10 +284,66 @@ func (p *InfobloxProvider) Records(ctx context.Context) (endpoints []*endpoint.E
|
||||
endpoints = append(endpoints, endpoint.NewEndpoint(res.Name, endpoint.RecordTypeTXT, res.Text))
|
||||
}
|
||||
}
|
||||
|
||||
// update A records that have PTR record created for them already
|
||||
if p.createPTR {
|
||||
// save all ptr records into map for a quick look up
|
||||
ptrRecordsMap := make(map[string]bool)
|
||||
for _, ptrRecord := range endpoints {
|
||||
if ptrRecord.RecordType != endpoint.RecordTypePTR {
|
||||
continue
|
||||
}
|
||||
ptrRecordsMap[ptrRecord.DNSName] = true
|
||||
}
|
||||
|
||||
for i := range endpoints {
|
||||
if endpoints[i].RecordType != endpoint.RecordTypeA {
|
||||
continue
|
||||
}
|
||||
// if PTR record already exists for A record, then mark it as such
|
||||
if ptrRecordsMap[endpoints[i].DNSName] {
|
||||
found := false
|
||||
for j := range endpoints[i].ProviderSpecific {
|
||||
if endpoints[i].ProviderSpecific[j].Name == providerSpecificInfobloxPtrRecord {
|
||||
endpoints[i].ProviderSpecific[j].Value = "true"
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
endpoints[i].WithProviderSpecific(providerSpecificInfobloxPtrRecord, "true")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Debugf("fetched %d records from infoblox", len(endpoints))
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (p *InfobloxProvider) AdjustEndpoints(endpoints []*endpoint.Endpoint) []*endpoint.Endpoint {
|
||||
if !p.createPTR {
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// for all A records, we want to create PTR records
|
||||
// so add provider specific property to track if the record was created or not
|
||||
for i := range endpoints {
|
||||
if endpoints[i].RecordType == endpoint.RecordTypeA {
|
||||
found := false
|
||||
for j := range endpoints[i].ProviderSpecific {
|
||||
if endpoints[i].ProviderSpecific[j].Name == providerSpecificInfobloxPtrRecord {
|
||||
endpoints[i].ProviderSpecific[j].Value = "true"
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
endpoints[i].WithProviderSpecific(providerSpecificInfobloxPtrRecord, "true")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// ApplyChanges applies the given changes.
|
||||
func (p *InfobloxProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
|
||||
zones, err := p.zones()
|
||||
@ -301,6 +399,17 @@ func (p *InfobloxProvider) mapChanges(zones []ibclient.ZoneAuth, changes *plan.C
|
||||
}
|
||||
// Ensure the record type is suitable
|
||||
changeMap[zone.Fqdn] = append(changeMap[zone.Fqdn], change)
|
||||
|
||||
if p.createPTR && change.RecordType == endpoint.RecordTypeA {
|
||||
reverseZone := p.findReverseZone(zones, change.Targets[0])
|
||||
if reverseZone == nil {
|
||||
logrus.Debugf("Ignoring changes to '%s' because a suitable Infoblox DNS reverse zone was not found.", change.Targets[0])
|
||||
return
|
||||
}
|
||||
changecopy := *change
|
||||
changecopy.RecordType = endpoint.RecordTypePTR
|
||||
changeMap[reverseZone.Fqdn] = append(changeMap[reverseZone.Fqdn], &changecopy)
|
||||
}
|
||||
}
|
||||
|
||||
for _, change := range changes.Delete {
|
||||
@ -338,6 +447,28 @@ func (p *InfobloxProvider) findZone(zones []ibclient.ZoneAuth, name string) *ibc
|
||||
return result
|
||||
}
|
||||
|
||||
func (p *InfobloxProvider) findReverseZone(zones []ibclient.ZoneAuth, name string) *ibclient.ZoneAuth {
|
||||
ip := net.ParseIP(name)
|
||||
networks := map[int]*ibclient.ZoneAuth{}
|
||||
maxMask := 0
|
||||
|
||||
for i, zone := range zones {
|
||||
_, net, err := net.ParseCIDR(zone.Fqdn)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Debugf("fqdn %s is no cidr", zone.Fqdn)
|
||||
} else {
|
||||
if net.Contains(ip) {
|
||||
_, mask := net.Mask.Size()
|
||||
networks[mask] = &zones[i]
|
||||
if mask > maxMask {
|
||||
maxMask = mask
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return networks[maxMask]
|
||||
}
|
||||
|
||||
func (p *InfobloxProvider) recordSet(ep *endpoint.Endpoint, getObject bool, targetIndex int) (recordSet infobloxRecordSet, err error) {
|
||||
switch ep.RecordType {
|
||||
case endpoint.RecordTypeA:
|
||||
@ -359,6 +490,25 @@ func (p *InfobloxProvider) recordSet(ep *endpoint.Endpoint, getObject bool, targ
|
||||
obj: obj,
|
||||
res: &res,
|
||||
}
|
||||
case endpoint.RecordTypePTR:
|
||||
var res []ibclient.RecordPTR
|
||||
obj := ibclient.NewRecordPTR(
|
||||
ibclient.RecordPTR{
|
||||
PtrdName: ep.DNSName,
|
||||
Ipv4Addr: ep.Targets[targetIndex],
|
||||
View: p.view,
|
||||
},
|
||||
)
|
||||
if getObject {
|
||||
err = p.client.GetObject(obj, "", &res)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
recordSet = infobloxRecordSet{
|
||||
obj: obj,
|
||||
res: &res,
|
||||
}
|
||||
case endpoint.RecordTypeCNAME:
|
||||
var res []ibclient.RecordCNAME
|
||||
obj := ibclient.NewRecordCNAME(
|
||||
@ -483,6 +633,10 @@ func (p *InfobloxProvider) deleteRecords(deleted infobloxChangeMap) {
|
||||
for _, record := range *recordSet.res.(*[]ibclient.RecordA) {
|
||||
_, err = p.client.DeleteObject(record.Ref)
|
||||
}
|
||||
case endpoint.RecordTypePTR:
|
||||
for _, record := range *recordSet.res.(*[]ibclient.RecordPTR) {
|
||||
_, err = p.client.DeleteObject(record.Ref)
|
||||
}
|
||||
case endpoint.RecordTypeCNAME:
|
||||
for _, record := range *recordSet.res.(*[]ibclient.RecordCNAME) {
|
||||
_, err = p.client.DeleteObject(record.Ref)
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"testing"
|
||||
|
||||
ibclient "github.com/infobloxopen/infoblox-go-client"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
@ -89,6 +90,21 @@ func (client *mockIBConnector) CreateObject(obj ibclient.IBObject) (ref string,
|
||||
)
|
||||
obj.(*ibclient.RecordTXT).Ref = ref
|
||||
ref = fmt.Sprintf("%s/%s:%s/default", obj.ObjectType(), base64.StdEncoding.EncodeToString([]byte(obj.(*ibclient.RecordTXT).Name)), obj.(*ibclient.RecordTXT).Name)
|
||||
case "record:ptr":
|
||||
client.createdEndpoints = append(
|
||||
client.createdEndpoints,
|
||||
endpoint.NewEndpoint(
|
||||
obj.(*ibclient.RecordPTR).PtrdName,
|
||||
endpoint.RecordTypePTR,
|
||||
obj.(*ibclient.RecordPTR).Ipv4Addr,
|
||||
),
|
||||
)
|
||||
obj.(*ibclient.RecordPTR).Ref = ref
|
||||
reverseAddr, err := dns.ReverseAddr(obj.(*ibclient.RecordPTR).Ipv4Addr)
|
||||
if err != nil {
|
||||
return ref, fmt.Errorf("unable to create reverse addr from %s", obj.(*ibclient.RecordPTR).Ipv4Addr)
|
||||
}
|
||||
ref = fmt.Sprintf("%s/%s:%s/default", obj.ObjectType(), base64.StdEncoding.EncodeToString([]byte(obj.(*ibclient.RecordPTR).PtrdName)), reverseAddr)
|
||||
}
|
||||
*client.mockInfobloxObjects = append(
|
||||
*client.mockInfobloxObjects,
|
||||
@ -163,6 +179,22 @@ func (client *mockIBConnector) GetObject(obj ibclient.IBObject, ref string, res
|
||||
}
|
||||
}
|
||||
*res.(*[]ibclient.RecordTXT) = result
|
||||
case "record:ptr":
|
||||
var result []ibclient.RecordPTR
|
||||
for _, object := range *client.mockInfobloxObjects {
|
||||
if object.ObjectType() == "record:ptr" {
|
||||
if ref != "" &&
|
||||
ref != object.(*ibclient.RecordPTR).Ref {
|
||||
continue
|
||||
}
|
||||
if obj.(*ibclient.RecordPTR).PtrdName != "" &&
|
||||
obj.(*ibclient.RecordPTR).PtrdName != object.(*ibclient.RecordPTR).PtrdName {
|
||||
continue
|
||||
}
|
||||
result = append(result, *object.(*ibclient.RecordPTR))
|
||||
}
|
||||
}
|
||||
*res.(*[]ibclient.RecordPTR) = result
|
||||
case "zone_auth":
|
||||
*res.(*[]ibclient.ZoneAuth) = *client.mockInfobloxZones
|
||||
}
|
||||
@ -246,6 +278,24 @@ func (client *mockIBConnector) DeleteObject(ref string) (refRes string, err erro
|
||||
),
|
||||
)
|
||||
}
|
||||
case "record:ptr":
|
||||
var records []ibclient.RecordPTR
|
||||
obj := ibclient.NewRecordPTR(
|
||||
ibclient.RecordPTR{
|
||||
Name: result[2],
|
||||
},
|
||||
)
|
||||
client.GetObject(obj, ref, &records)
|
||||
for _, record := range records {
|
||||
client.deletedEndpoints = append(
|
||||
client.deletedEndpoints,
|
||||
endpoint.NewEndpoint(
|
||||
record.PtrdName,
|
||||
endpoint.RecordTypePTR,
|
||||
"",
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
@ -339,16 +389,25 @@ func createMockInfobloxObject(name, recordType, value string) ibclient.IBObject
|
||||
},
|
||||
},
|
||||
)
|
||||
case endpoint.RecordTypePTR:
|
||||
return ibclient.NewRecordPTR(
|
||||
ibclient.RecordPTR{
|
||||
Ref: ref,
|
||||
PtrdName: name,
|
||||
Ipv4Addr: value,
|
||||
},
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newInfobloxProvider(domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool, client ibclient.IBConnector) *InfobloxProvider {
|
||||
func newInfobloxProvider(domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool, createPTR bool, client ibclient.IBConnector) *InfobloxProvider {
|
||||
return &InfobloxProvider{
|
||||
client: client,
|
||||
domainFilter: domainFilter,
|
||||
zoneIDFilter: zoneIDFilter,
|
||||
dryRun: dryRun,
|
||||
createPTR: createPTR,
|
||||
}
|
||||
}
|
||||
|
||||
@ -376,7 +435,7 @@ func TestInfobloxRecords(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
provider := newInfobloxProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, &client)
|
||||
provider := newInfobloxProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, false, &client)
|
||||
actual, err := provider.Records(context.Background())
|
||||
|
||||
if err != nil {
|
||||
@ -399,10 +458,66 @@ func TestInfobloxRecords(t *testing.T) {
|
||||
validateEndpoints(t, actual, expected)
|
||||
}
|
||||
|
||||
func TestInfobloxAdjustEndpoints(t *testing.T) {
|
||||
client := mockIBConnector{
|
||||
mockInfobloxZones: &[]ibclient.ZoneAuth{
|
||||
createMockInfobloxZone("example.com"),
|
||||
createMockInfobloxZone("other.com"),
|
||||
},
|
||||
mockInfobloxObjects: &[]ibclient.IBObject{
|
||||
createMockInfobloxObject("example.com", endpoint.RecordTypeA, "123.123.123.122"),
|
||||
createMockInfobloxObject("example.com", endpoint.RecordTypeTXT, "heritage=external-dns,external-dns/owner=default"),
|
||||
createMockInfobloxObject("hack.example.com", endpoint.RecordTypeCNAME, "cerberus.infoblox.com"),
|
||||
createMockInfobloxObject("host.example.com", "HOST", "125.1.1.1"),
|
||||
},
|
||||
}
|
||||
|
||||
provider := newInfobloxProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, true, &client)
|
||||
actual, err := provider.Records(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
provider.AdjustEndpoints(actual)
|
||||
|
||||
expected := []*endpoint.Endpoint{
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeA, "123.123.123.122").WithProviderSpecific(providerSpecificInfobloxPtrRecord, "true"),
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeTXT, "\"heritage=external-dns,external-dns/owner=default\""),
|
||||
endpoint.NewEndpoint("hack.example.com", endpoint.RecordTypeCNAME, "cerberus.infoblox.com"),
|
||||
endpoint.NewEndpoint("host.example.com", endpoint.RecordTypeA, "125.1.1.1").WithProviderSpecific(providerSpecificInfobloxPtrRecord, "true"),
|
||||
}
|
||||
validateEndpoints(t, actual, expected)
|
||||
}
|
||||
|
||||
func TestInfobloxRecordsReverse(t *testing.T) {
|
||||
|
||||
client := mockIBConnector{
|
||||
mockInfobloxZones: &[]ibclient.ZoneAuth{
|
||||
createMockInfobloxZone("10.0.0.0/24"),
|
||||
createMockInfobloxZone("10.0.1.0/24"),
|
||||
},
|
||||
mockInfobloxObjects: &[]ibclient.IBObject{
|
||||
createMockInfobloxObject("example.com", endpoint.RecordTypePTR, "10.0.0.1"),
|
||||
createMockInfobloxObject("example2.com", endpoint.RecordTypePTR, "10.0.0.2"),
|
||||
},
|
||||
}
|
||||
|
||||
provider := newInfobloxProvider(endpoint.NewDomainFilter([]string{"10.0.0.0/24"}), provider.NewZoneIDFilter([]string{""}), true, true, &client)
|
||||
actual, err := provider.Records(context.Background())
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := []*endpoint.Endpoint{
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypePTR, "10.0.0.1"),
|
||||
endpoint.NewEndpoint("example2.com", endpoint.RecordTypePTR, "10.0.0.2"),
|
||||
}
|
||||
validateEndpoints(t, actual, expected)
|
||||
}
|
||||
|
||||
func TestInfobloxApplyChanges(t *testing.T) {
|
||||
client := mockIBConnector{}
|
||||
|
||||
testInfobloxApplyChangesInternal(t, false, &client)
|
||||
testInfobloxApplyChangesInternal(t, false, false, &client)
|
||||
|
||||
validateEndpoints(t, client.createdEndpoints, []*endpoint.Endpoint{
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeA, "1.2.3.4"),
|
||||
@ -423,7 +538,39 @@ func TestInfobloxApplyChanges(t *testing.T) {
|
||||
endpoint.NewEndpoint("old.example.com", endpoint.RecordTypeA, ""),
|
||||
endpoint.NewEndpoint("oldcname.example.com", endpoint.RecordTypeCNAME, ""),
|
||||
endpoint.NewEndpoint("deleted.example.com", endpoint.RecordTypeA, ""),
|
||||
endpoint.NewEndpoint("deleted.example.com", endpoint.RecordTypeTXT, ""),
|
||||
endpoint.NewEndpoint("deletedcname.example.com", endpoint.RecordTypeCNAME, ""),
|
||||
})
|
||||
|
||||
validateEndpoints(t, client.updatedEndpoints, []*endpoint.Endpoint{})
|
||||
}
|
||||
|
||||
func TestInfobloxApplyChangesReverse(t *testing.T) {
|
||||
client := mockIBConnector{}
|
||||
|
||||
testInfobloxApplyChangesInternal(t, false, true, &client)
|
||||
|
||||
validateEndpoints(t, client.createdEndpoints, []*endpoint.Endpoint{
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeA, "1.2.3.4"),
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypePTR, "1.2.3.4"),
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeTXT, "tag"),
|
||||
endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypeA, "1.2.3.4"),
|
||||
endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypePTR, "1.2.3.4"),
|
||||
endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypeTXT, "tag"),
|
||||
endpoint.NewEndpoint("bar.example.com", endpoint.RecordTypeCNAME, "other.com"),
|
||||
endpoint.NewEndpoint("bar.example.com", endpoint.RecordTypeTXT, "tag"),
|
||||
endpoint.NewEndpoint("other.com", endpoint.RecordTypeA, "5.6.7.8"),
|
||||
endpoint.NewEndpoint("other.com", endpoint.RecordTypeTXT, "tag"),
|
||||
endpoint.NewEndpoint("new.example.com", endpoint.RecordTypeA, "111.222.111.222"),
|
||||
endpoint.NewEndpoint("newcname.example.com", endpoint.RecordTypeCNAME, "other.com"),
|
||||
endpoint.NewEndpoint("multiple.example.com", endpoint.RecordTypeA, "1.2.3.4,3.4.5.6,8.9.10.11"),
|
||||
endpoint.NewEndpoint("multiple.example.com", endpoint.RecordTypeTXT, "tag-multiple-A-records"),
|
||||
})
|
||||
|
||||
validateEndpoints(t, client.deletedEndpoints, []*endpoint.Endpoint{
|
||||
endpoint.NewEndpoint("old.example.com", endpoint.RecordTypeA, ""),
|
||||
endpoint.NewEndpoint("oldcname.example.com", endpoint.RecordTypeCNAME, ""),
|
||||
endpoint.NewEndpoint("deleted.example.com", endpoint.RecordTypeA, ""),
|
||||
endpoint.NewEndpoint("deleted.example.com", endpoint.RecordTypePTR, ""),
|
||||
endpoint.NewEndpoint("deletedcname.example.com", endpoint.RecordTypeCNAME, ""),
|
||||
})
|
||||
|
||||
@ -435,7 +582,7 @@ func TestInfobloxApplyChangesDryRun(t *testing.T) {
|
||||
mockInfobloxObjects: &[]ibclient.IBObject{},
|
||||
}
|
||||
|
||||
testInfobloxApplyChangesInternal(t, true, &client)
|
||||
testInfobloxApplyChangesInternal(t, true, false, &client)
|
||||
|
||||
validateEndpoints(t, client.createdEndpoints, []*endpoint.Endpoint{})
|
||||
|
||||
@ -444,14 +591,16 @@ func TestInfobloxApplyChangesDryRun(t *testing.T) {
|
||||
validateEndpoints(t, client.updatedEndpoints, []*endpoint.Endpoint{})
|
||||
}
|
||||
|
||||
func testInfobloxApplyChangesInternal(t *testing.T, dryRun bool, client ibclient.IBConnector) {
|
||||
func testInfobloxApplyChangesInternal(t *testing.T, dryRun, createPTR bool, client ibclient.IBConnector) {
|
||||
client.(*mockIBConnector).mockInfobloxZones = &[]ibclient.ZoneAuth{
|
||||
createMockInfobloxZone("example.com"),
|
||||
createMockInfobloxZone("other.com"),
|
||||
createMockInfobloxZone("1.2.3.0/24"),
|
||||
}
|
||||
client.(*mockIBConnector).mockInfobloxObjects = &[]ibclient.IBObject{
|
||||
createMockInfobloxObject("deleted.example.com", endpoint.RecordTypeA, "121.212.121.212"),
|
||||
createMockInfobloxObject("deleted.example.com", endpoint.RecordTypeTXT, "test-deleting-txt"),
|
||||
createMockInfobloxObject("deleted.example.com", endpoint.RecordTypePTR, "121.212.121.212"),
|
||||
createMockInfobloxObject("deletedcname.example.com", endpoint.RecordTypeCNAME, "other.com"),
|
||||
createMockInfobloxObject("old.example.com", endpoint.RecordTypeA, "121.212.121.212"),
|
||||
createMockInfobloxObject("oldcname.example.com", endpoint.RecordTypeCNAME, "other.com"),
|
||||
@ -461,6 +610,7 @@ func testInfobloxApplyChangesInternal(t *testing.T, dryRun bool, client ibclient
|
||||
endpoint.NewDomainFilter([]string{""}),
|
||||
provider.NewZoneIDFilter([]string{""}),
|
||||
dryRun,
|
||||
createPTR,
|
||||
client,
|
||||
)
|
||||
|
||||
@ -493,11 +643,14 @@ func testInfobloxApplyChangesInternal(t *testing.T, dryRun bool, client ibclient
|
||||
|
||||
deleteRecords := []*endpoint.Endpoint{
|
||||
endpoint.NewEndpoint("deleted.example.com", endpoint.RecordTypeA, "121.212.121.212"),
|
||||
endpoint.NewEndpoint("deleted.example.com", endpoint.RecordTypeTXT, "test-deleting-txt"),
|
||||
endpoint.NewEndpoint("deletedcname.example.com", endpoint.RecordTypeCNAME, "other.com"),
|
||||
endpoint.NewEndpoint("deleted.nope.com", endpoint.RecordTypeA, "222.111.222.111"),
|
||||
}
|
||||
|
||||
if createPTR {
|
||||
deleteRecords = append(deleteRecords, endpoint.NewEndpoint("deleted.example.com", endpoint.RecordTypePTR, "121.212.121.212"))
|
||||
}
|
||||
|
||||
changes := &plan.Changes{
|
||||
Create: createRecords,
|
||||
UpdateNew: updateNewRecords,
|
||||
@ -516,11 +669,12 @@ func TestInfobloxZones(t *testing.T) {
|
||||
createMockInfobloxZone("example.com"),
|
||||
createMockInfobloxZone("lvl1-1.example.com"),
|
||||
createMockInfobloxZone("lvl2-1.lvl1-1.example.com"),
|
||||
createMockInfobloxZone("1.2.3.0/24"),
|
||||
},
|
||||
mockInfobloxObjects: &[]ibclient.IBObject{},
|
||||
}
|
||||
|
||||
provider := newInfobloxProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, &client)
|
||||
provider := newInfobloxProvider(endpoint.NewDomainFilter([]string{"example.com", "1.2.3.0/24"}), provider.NewZoneIDFilter([]string{""}), true, false, &client)
|
||||
zones, _ := provider.zones()
|
||||
var emptyZoneAuth *ibclient.ZoneAuth
|
||||
assert.Equal(t, provider.findZone(zones, "example.com").Fqdn, "example.com")
|
||||
@ -531,6 +685,26 @@ func TestInfobloxZones(t *testing.T) {
|
||||
assert.Equal(t, provider.findZone(zones, "lvl2-1.lvl1-1.example.com").Fqdn, "lvl2-1.lvl1-1.example.com")
|
||||
assert.Equal(t, provider.findZone(zones, "lvl2-2.lvl1-1.example.com").Fqdn, "lvl1-1.example.com")
|
||||
assert.Equal(t, provider.findZone(zones, "lvl2-2.lvl1-2.example.com").Fqdn, "example.com")
|
||||
assert.Equal(t, provider.findZone(zones, "1.2.3.0/24").Fqdn, "1.2.3.0/24")
|
||||
}
|
||||
|
||||
func TestInfobloxReverseZones(t *testing.T) {
|
||||
client := mockIBConnector{
|
||||
mockInfobloxZones: &[]ibclient.ZoneAuth{
|
||||
createMockInfobloxZone("example.com"),
|
||||
createMockInfobloxZone("1.2.3.0/24"),
|
||||
createMockInfobloxZone("10.0.0.0/8"),
|
||||
},
|
||||
mockInfobloxObjects: &[]ibclient.IBObject{},
|
||||
}
|
||||
|
||||
provider := newInfobloxProvider(endpoint.NewDomainFilter([]string{"example.com", "1.2.3.0/24", "10.0.0.0/8"}), provider.NewZoneIDFilter([]string{""}), true, false, &client)
|
||||
zones, _ := provider.zones()
|
||||
var emptyZoneAuth *ibclient.ZoneAuth
|
||||
assert.Equal(t, provider.findReverseZone(zones, "nomatch-example.com"), emptyZoneAuth)
|
||||
assert.Equal(t, provider.findReverseZone(zones, "192.168.0.1"), emptyZoneAuth)
|
||||
assert.Equal(t, provider.findReverseZone(zones, "1.2.3.4").Fqdn, "1.2.3.0/24")
|
||||
assert.Equal(t, provider.findReverseZone(zones, "10.28.29.30").Fqdn, "10.0.0.0/8")
|
||||
}
|
||||
|
||||
func TestExtendedRequestFDQDRegExBuilder(t *testing.T) {
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/mvcc/mvccpb"
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
|
@ -278,7 +278,7 @@ func endpointToScalewayRecords(zoneName string, ep *endpoint.Endpoint) []*domain
|
||||
}
|
||||
var priority = scalewayDefaultPriority
|
||||
if prop, ok := ep.GetProviderSpecificProperty(scalewayPriorityKey); ok {
|
||||
prio, err := strconv.ParseUint(prop.Value, 10, 64)
|
||||
prio, err := strconv.ParseUint(prop.Value, 10, 32)
|
||||
if err != nil {
|
||||
log.Errorf("Failed parsing value of %s: %s: %v; using priority of %d", scalewayPriorityKey, prop.Value, err, scalewayDefaultPriority)
|
||||
} else {
|
||||
|
@ -37,24 +37,34 @@ func NewZoneTypeFilter(zoneType string) ZoneTypeFilter {
|
||||
}
|
||||
|
||||
// Match checks whether a zone matches the zone type that's filtered for.
|
||||
func (f ZoneTypeFilter) Match(zone *route53.HostedZone) bool {
|
||||
func (f ZoneTypeFilter) Match(rawZoneType interface{}) bool {
|
||||
// An empty zone filter includes all hosted zones.
|
||||
if f.zoneType == "" {
|
||||
return true
|
||||
}
|
||||
|
||||
switch zoneType := rawZoneType.(type) {
|
||||
// Given a zone type we return true if the given zone matches this type.
|
||||
case string:
|
||||
switch f.zoneType {
|
||||
case zoneTypePublic:
|
||||
return zoneType == zoneTypePublic
|
||||
case zoneTypePrivate:
|
||||
return zoneType == zoneTypePrivate
|
||||
}
|
||||
case *route53.HostedZone:
|
||||
// If the zone has no config we assume it's a public zone since the config's field
|
||||
// `PrivateZone` is false by default in go.
|
||||
if zone.Config == nil {
|
||||
if zoneType.Config == nil {
|
||||
return f.zoneType == zoneTypePublic
|
||||
}
|
||||
|
||||
// Given a zone type we return true if the given zone matches this type.
|
||||
switch f.zoneType {
|
||||
case zoneTypePublic:
|
||||
return !aws.BoolValue(zone.Config.PrivateZone)
|
||||
return !aws.BoolValue(zoneType.Config.PrivateZone)
|
||||
case zoneTypePrivate:
|
||||
return aws.BoolValue(zone.Config.PrivateZone)
|
||||
return aws.BoolValue(zoneType.Config.PrivateZone)
|
||||
}
|
||||
}
|
||||
|
||||
// We return false on any other path, e.g. unknown zone type filter value.
|
||||
|
@ -26,46 +26,38 @@ import (
|
||||
)
|
||||
|
||||
func TestZoneTypeFilterMatch(t *testing.T) {
|
||||
publicZone := &route53.HostedZone{Config: &route53.HostedZoneConfig{PrivateZone: aws.Bool(false)}}
|
||||
privateZone := &route53.HostedZone{Config: &route53.HostedZoneConfig{PrivateZone: aws.Bool(true)}}
|
||||
publicZoneStr := "public"
|
||||
privateZoneStr := "private"
|
||||
publicZoneAWS := &route53.HostedZone{Config: &route53.HostedZoneConfig{PrivateZone: aws.Bool(false)}}
|
||||
privateZoneAWS := &route53.HostedZone{Config: &route53.HostedZoneConfig{PrivateZone: aws.Bool(true)}}
|
||||
|
||||
for _, tc := range []struct {
|
||||
zoneTypeFilter string
|
||||
zone *route53.HostedZone
|
||||
matches bool
|
||||
zones []interface{}
|
||||
}{
|
||||
{
|
||||
"", publicZone, true,
|
||||
"", true, []interface{}{ publicZoneStr, privateZoneStr, &route53.HostedZone{} },
|
||||
},
|
||||
{
|
||||
"", privateZone, true,
|
||||
"public", true, []interface{}{ publicZoneStr, publicZoneAWS, &route53.HostedZone{} },
|
||||
},
|
||||
{
|
||||
"public", publicZone, true,
|
||||
"public", false, []interface{}{ privateZoneStr, privateZoneAWS },
|
||||
},
|
||||
{
|
||||
"public", privateZone, false,
|
||||
"private", true, []interface{}{ privateZoneStr, privateZoneAWS },
|
||||
},
|
||||
{
|
||||
"private", publicZone, false,
|
||||
"private", false, []interface{}{ publicZoneStr, publicZoneAWS, &route53.HostedZone{} },
|
||||
},
|
||||
{
|
||||
"private", privateZone, true,
|
||||
},
|
||||
{
|
||||
"unknown", publicZone, false,
|
||||
},
|
||||
{
|
||||
"", &route53.HostedZone{}, true,
|
||||
},
|
||||
{
|
||||
"public", &route53.HostedZone{}, true,
|
||||
},
|
||||
{
|
||||
"private", &route53.HostedZone{}, false,
|
||||
"unknown", false, []interface{}{ publicZoneStr },
|
||||
},
|
||||
} {
|
||||
zoneTypeFilter := NewZoneTypeFilter(tc.zoneTypeFilter)
|
||||
assert.Equal(t, tc.matches, zoneTypeFilter.Match(tc.zone))
|
||||
for _, zone := range tc.zones {
|
||||
assert.Equal(t, tc.matches, zoneTypeFilter.Match(zone))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
11
scripts/run-trivy.sh
Executable file
11
scripts/run-trivy.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# install trivy
|
||||
curl -LO https://github.com/aquasecurity/trivy/releases/download/v0.20.2/trivy_0.20.2_Linux-64bit.tar.gz
|
||||
echo "38a6de48e21a34e0fa0d2cf63439c0afcbbae0e78fb3feada7a84a9cf6e7f60c trivy_0.20.2_Linux-64bit.tar.gz" | sha256sum -c
|
||||
tar -xvf trivy_0.20.2_Linux-64bit.tar.gz
|
||||
chmod +x trivy
|
||||
|
||||
# run trivy
|
||||
./trivy image --exit-code 1 us.gcr.io/k8s-artifacts-prod/external-dns/external-dns:$(git describe --tags --always --dirty)
|
@ -54,7 +54,7 @@ if external_dns_manages_services:
|
||||
k8s_domains.extend(annotations['domainName'].split(','))
|
||||
|
||||
if external_dns_manages_ingresses:
|
||||
ev1 = client.ExtensionsV1beta1Api()
|
||||
ev1 = client.NetworkingV1Api()
|
||||
ings = ev1.list_ingress_for_all_namespaces()
|
||||
for i in ings.items:
|
||||
for r in i.spec.rules:
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ambassador "github.com/datawire/ambassador/pkg/api/getambassador.io/v2"
|
||||
"github.com/pkg/errors"
|
||||
@ -38,7 +37,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
)
|
||||
@ -87,12 +85,8 @@ func NewAmbassadorHostSource(
|
||||
// TODO informer is not explicitly stopped since controller is not passing in its channel.
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return ambassadorHostInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to sync cache")
|
||||
if err := waitForDynamicCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uc, err := newUnstructuredConverter()
|
||||
@ -240,7 +234,7 @@ func parseAmbLoadBalancerService(service string) (namespace, name string, err er
|
||||
// If here, we have no separator, so the whole string is the service, and
|
||||
// we can assume the default namespace.
|
||||
name := service
|
||||
namespace := api.NamespaceDefault
|
||||
namespace := "default"
|
||||
|
||||
return namespace, name, nil
|
||||
} else if len(parts) == 2 {
|
||||
|
@ -36,8 +36,8 @@ func (suite *ConnectorSuite) SetupTest() {
|
||||
|
||||
}
|
||||
|
||||
func startServerToServeTargets(t *testing.T, server string, endpoints []*endpoint.Endpoint) {
|
||||
ln, err := net.Listen("tcp", server)
|
||||
func startServerToServeTargets(t *testing.T, endpoints []*endpoint.Endpoint) net.Listener {
|
||||
ln, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -52,10 +52,13 @@ func startServerToServeTargets(t *testing.T, server string, endpoints []*endpoin
|
||||
enc.Encode(endpoints)
|
||||
ln.Close()
|
||||
}()
|
||||
t.Logf("Server listening on %s", server)
|
||||
t.Logf("Server listening on %s", ln.Addr().String())
|
||||
return ln
|
||||
}
|
||||
|
||||
func TestConnectorSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(ConnectorSuite))
|
||||
t.Run("Interface", testConnectorSourceImplementsSource)
|
||||
t.Run("Endpoints", testConnectorSourceEndpoints)
|
||||
@ -70,27 +73,23 @@ func testConnectorSourceImplementsSource(t *testing.T) {
|
||||
func testConnectorSourceEndpoints(t *testing.T) {
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
serverListenAddress string
|
||||
serverAddress string
|
||||
server bool
|
||||
expected []*endpoint.Endpoint
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
title: "invalid remote server",
|
||||
serverListenAddress: "",
|
||||
serverAddress: "localhost:8091",
|
||||
server: false,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
title: "valid remote server with no endpoints",
|
||||
serverListenAddress: "127.0.0.1:8080",
|
||||
serverAddress: "127.0.0.1:8080",
|
||||
server: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
title: "valid remote server",
|
||||
serverListenAddress: "127.0.0.1:8081",
|
||||
serverAddress: "127.0.0.1:8081",
|
||||
server: true,
|
||||
expected: []*endpoint.Endpoint{
|
||||
{DNSName: "abc.example.org",
|
||||
Targets: endpoint.Targets{"1.2.3.4"},
|
||||
@ -102,8 +101,7 @@ func testConnectorSourceEndpoints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
title: "valid remote server with multiple endpoints",
|
||||
serverListenAddress: "127.0.0.1:8082",
|
||||
serverAddress: "127.0.0.1:8082",
|
||||
server: true,
|
||||
expected: []*endpoint.Endpoint{
|
||||
{DNSName: "abc.example.org",
|
||||
Targets: endpoint.Targets{"1.2.3.4"},
|
||||
@ -119,11 +117,17 @@ func testConnectorSourceEndpoints(t *testing.T) {
|
||||
expectError: false,
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
if ti.serverListenAddress != "" {
|
||||
startServerToServeTargets(t, ti.serverListenAddress, ti.expected)
|
||||
t.Parallel()
|
||||
|
||||
addr := "localhost:9999"
|
||||
if ti.server {
|
||||
ln := startServerToServeTargets(t, ti.expected)
|
||||
defer ln.Close()
|
||||
addr = ln.Addr().String()
|
||||
}
|
||||
cs, _ := NewConnectorSource(ti.serverAddress)
|
||||
cs, _ := NewConnectorSource(addr)
|
||||
|
||||
endpoints, err := cs.Endpoints(context.Background())
|
||||
if ti.expectError {
|
||||
|
@ -17,13 +17,10 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1"
|
||||
@ -63,18 +60,10 @@ func NewContourHTTPProxySource(
|
||||
combineFqdnAnnotation bool,
|
||||
ignoreHostnameAnnotation bool,
|
||||
) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared informer to listen for add/update/delete of HTTPProxys in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed.
|
||||
@ -93,11 +82,8 @@ func NewContourHTTPProxySource(
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return httpProxyInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to sync cache")
|
||||
if err := waitForDynamicCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uc, err := NewUnstructuredConverter()
|
||||
@ -197,22 +183,17 @@ func (sc *httpProxySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint,
|
||||
}
|
||||
|
||||
func (sc *httpProxySource) endpointsFromTemplate(httpProxy *projectcontour.HTTPProxy) ([]*endpoint.Endpoint, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, httpProxy)
|
||||
hostnames, err := execTemplate(sc.fqdnTemplate, httpProxy)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to apply template on HTTPProxy %s/%s", httpProxy.Namespace, httpProxy.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostnames := buf.String()
|
||||
|
||||
ttl, err := getTTLFromAnnotations(httpProxy.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
targets := getTargetsFromTargetAnnotation(httpProxy.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
for _, lb := range httpProxy.Status.LoadBalancer.Ingress {
|
||||
if lb.IP != "" {
|
||||
@ -227,10 +208,7 @@ func (sc *httpProxySource) endpointsFromTemplate(httpProxy *projectcontour.HTTPP
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(httpProxy.Annotations)
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
// splits the FQDN template and removes the trailing periods
|
||||
hostnameList := strings.Split(strings.Replace(hostnames, " ", "", -1), ",")
|
||||
for _, hostname := range hostnameList {
|
||||
hostname = strings.TrimSuffix(hostname, ".")
|
||||
for _, hostname := range hostnames {
|
||||
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
return endpoints, nil
|
||||
@ -324,17 +302,5 @@ func (sc *httpProxySource) AddEventHandler(ctx context.Context, handler func())
|
||||
|
||||
// Right now there is no way to remove event handler from informer, see:
|
||||
// https://github.com/kubernetes/kubernetes/issues/79610
|
||||
sc.httpProxyInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
handler()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
},
|
||||
)
|
||||
sc.httpProxyInformer.Informer().AddEventHandler(eventHandlerFunc(handler))
|
||||
}
|
@ -18,14 +18,16 @@ package source
|
||||
|
||||
import (
|
||||
"context"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"testing"
|
||||
|
||||
fakeDynamic "k8s.io/client-go/dynamic/fake"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -41,6 +43,46 @@ type HTTPProxySuite struct {
|
||||
httpProxy *projectcontour.HTTPProxy
|
||||
}
|
||||
|
||||
func newDynamicKubernetesClient() (*fakeDynamic.FakeDynamicClient, *runtime.Scheme) {
|
||||
s := runtime.NewScheme()
|
||||
_ = projectcontour.AddToScheme(s)
|
||||
return fakeDynamic.NewSimpleDynamicClient(s), s
|
||||
}
|
||||
|
||||
type fakeLoadBalancerService struct {
|
||||
ips []string
|
||||
hostnames []string
|
||||
namespace string
|
||||
name string
|
||||
}
|
||||
|
||||
func (ig fakeLoadBalancerService) Service() *v1.Service {
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ig.namespace,
|
||||
Name: ig.name,
|
||||
},
|
||||
Status: v1.ServiceStatus{
|
||||
LoadBalancer: v1.LoadBalancerStatus{
|
||||
Ingress: []v1.LoadBalancerIngress{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, ip := range ig.ips {
|
||||
svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, v1.LoadBalancerIngress{
|
||||
IP: ip,
|
||||
})
|
||||
}
|
||||
for _, hostname := range ig.hostnames {
|
||||
svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, v1.LoadBalancerIngress{
|
||||
Hostname: hostname,
|
||||
})
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
func (suite *HTTPProxySuite) SetupTest() {
|
||||
fakeDynamicClient, s := newDynamicKubernetesClient()
|
||||
var err error
|
||||
@ -87,12 +129,16 @@ func convertHTTPProxyToUnstructured(hp *projectcontour.HTTPProxy, s *runtime.Sch
|
||||
}
|
||||
|
||||
func TestHTTPProxy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(HTTPProxySuite))
|
||||
t.Run("endpointsFromHTTPProxy", testEndpointsFromHTTPProxy)
|
||||
t.Run("Endpoints", testHTTPProxyEndpoints)
|
||||
}
|
||||
|
||||
func TestNewContourHTTPProxySource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
annotationFilter string
|
||||
@ -131,7 +177,10 @@ func TestNewContourHTTPProxySource(t *testing.T) {
|
||||
annotationFilter: "contour.heptio.com/ingress.class=contour",
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fakeDynamicClient, _ := newDynamicKubernetesClient()
|
||||
|
||||
_, err := NewContourHTTPProxySource(
|
||||
@ -152,6 +201,8 @@ func TestNewContourHTTPProxySource(t *testing.T) {
|
||||
}
|
||||
|
||||
func testEndpointsFromHTTPProxy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
httpProxy fakeHTTPProxy
|
||||
@ -233,7 +284,10 @@ func testEndpointsFromHTTPProxy(t *testing.T) {
|
||||
expected: []*endpoint.Endpoint{},
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if source, err := newTestHTTPProxySource(); err != nil {
|
||||
require.NoError(t, err)
|
||||
} else if endpoints, err := source.endpointsFromHTTPProxy(ti.httpProxy.HTTPProxy()); err != nil {
|
||||
@ -246,6 +300,8 @@ func testEndpointsFromHTTPProxy(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHTTPProxyEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
namespace := "testing"
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
@ -958,7 +1014,10 @@ func testHTTPProxyEndpoints(t *testing.T) {
|
||||
ignoreHostnameAnnotation: true,
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpProxies := make([]*projectcontour.HTTPProxy, 0)
|
||||
for _, item := range ti.httpProxyItems {
|
||||
item.loadBalancer = ti.loadBalancer
|
||||
@ -1071,7 +1130,7 @@ func (ir fakeHTTPProxy) HTTPProxy() *projectcontour.HTTPProxy {
|
||||
Annotations: ir.annotations,
|
||||
},
|
||||
Spec: spec,
|
||||
Status: projectcontour.Status{
|
||||
Status: projectcontour.HTTPProxyStatus{
|
||||
CurrentStatus: status,
|
||||
LoadBalancer: lb,
|
||||
},
|
@ -43,7 +43,7 @@ type crdSource struct {
|
||||
crdResource string
|
||||
codec runtime.ParameterCodec
|
||||
annotationFilter string
|
||||
labelFilter string
|
||||
labelSelector labels.Selector
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error {
|
||||
@ -103,12 +103,12 @@ func NewCRDClientForAPIVersionKind(client kubernetes.Interface, kubeConfig, apiS
|
||||
}
|
||||
|
||||
// NewCRDSource creates a new crdSource with the given config.
|
||||
func NewCRDSource(crdClient rest.Interface, namespace, kind string, annotationFilter string, labelFilter string, scheme *runtime.Scheme) (Source, error) {
|
||||
func NewCRDSource(crdClient rest.Interface, namespace, kind string, annotationFilter string, labelSelector labels.Selector, scheme *runtime.Scheme) (Source, error) {
|
||||
return &crdSource{
|
||||
crdResource: strings.ToLower(kind) + "s",
|
||||
namespace: namespace,
|
||||
annotationFilter: annotationFilter,
|
||||
labelFilter: labelFilter,
|
||||
labelSelector: labelSelector,
|
||||
crdClient: crdClient,
|
||||
codec: runtime.NewParameterCodec(scheme),
|
||||
}, nil
|
||||
@ -126,11 +126,7 @@ func (cs *crdSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error
|
||||
err error
|
||||
)
|
||||
|
||||
if cs.labelFilter != "" {
|
||||
result, err = cs.List(ctx, &metav1.ListOptions{LabelSelector: cs.labelFilter})
|
||||
} else {
|
||||
result, err = cs.List(ctx, &metav1.ListOptions{})
|
||||
}
|
||||
result, err = cs.List(ctx, &metav1.ListOptions{LabelSelector: cs.labelSelector.String()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
@ -57,7 +58,7 @@ func objBody(codec runtime.Encoder, obj runtime.Object) io.ReadCloser {
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))
|
||||
}
|
||||
|
||||
func startCRDServerToServeTargets(endpoints []*endpoint.Endpoint, apiVersion, kind, namespace, name string, annotations map[string]string, labels map[string]string, t *testing.T) rest.Interface {
|
||||
func fakeRESTClient(endpoints []*endpoint.Endpoint, apiVersion, kind, namespace, name string, annotations map[string]string, labels map[string]string, t *testing.T) rest.Interface {
|
||||
groupVersion, _ := schema.ParseGroupVersion(apiVersion)
|
||||
scheme := runtime.NewScheme()
|
||||
addKnownTypes(scheme, groupVersion)
|
||||
@ -372,15 +373,22 @@ func testCRDSourceEndpoints(t *testing.T) {
|
||||
expectError: false,
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
restClient := startCRDServerToServeTargets(ti.endpoints, ti.registeredAPIVersion, ti.registeredKind, ti.registeredNamespace, "test", ti.annotations, ti.labels, t)
|
||||
t.Parallel()
|
||||
|
||||
restClient := fakeRESTClient(ti.endpoints, ti.registeredAPIVersion, ti.registeredKind, ti.registeredNamespace, "test", ti.annotations, ti.labels, t)
|
||||
groupVersion, err := schema.ParseGroupVersion(ti.apiVersion)
|
||||
require.NoError(t, err)
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
addKnownTypes(scheme, groupVersion)
|
||||
require.NoError(t, addKnownTypes(scheme, groupVersion))
|
||||
|
||||
cs, _ := NewCRDSource(restClient, ti.namespace, ti.kind, ti.annotationFilter, ti.labelFilter, scheme)
|
||||
labelSelector, err := labels.Parse(ti.labelFilter)
|
||||
require.NoError(t, err)
|
||||
|
||||
cs, err := NewCRDSource(restClient, ti.namespace, ti.kind, ti.annotationFilter, labelSelector, scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
receivedEndpoints, err := cs.Endpoints(context.Background())
|
||||
if ti.expectError {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
fakeDynamic "k8s.io/client-go/dynamic/fake"
|
||||
fakeKube "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
@ -211,8 +212,13 @@ var externalProxySource = metav1.PartialObjectMetadata{
|
||||
}
|
||||
|
||||
func TestGlooSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fakeKubernetesClient := fakeKube.NewSimpleClientset()
|
||||
fakeDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme())
|
||||
fakeDynamicClient := fakeDynamic.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(),
|
||||
map[schema.GroupVersionResource]string{
|
||||
proxyGVR: "ProxyList",
|
||||
})
|
||||
|
||||
source, err := NewGlooSource(fakeDynamicClient, fakeKubernetesClient, defaultGlooNamespace)
|
||||
assert.NoError(t, err)
|
||||
@ -263,7 +269,7 @@ func TestGlooSource(t *testing.T) {
|
||||
endpoints, err := source.Endpoints(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, endpoints, 5)
|
||||
assert.Equal(t, endpoints, []*endpoint.Endpoint{
|
||||
assert.ElementsMatch(t, endpoints, []*endpoint.Endpoint{
|
||||
&endpoint.Endpoint{
|
||||
DNSName: "a.test",
|
||||
Targets: []string{internalProxySvc.Status.LoadBalancer.Ingress[0].IP, internalProxySvc.Status.LoadBalancer.Ingress[1].IP, internalProxySvc.Status.LoadBalancer.Ingress[2].IP},
|
@ -17,20 +17,18 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
networkv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
extinformers "k8s.io/client-go/informers/extensions/v1beta1"
|
||||
netinformers "k8s.io/client-go/informers/networking/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
@ -59,30 +57,23 @@ type ingressSource struct {
|
||||
fqdnTemplate *template.Template
|
||||
combineFQDNAnnotation bool
|
||||
ignoreHostnameAnnotation bool
|
||||
ingressInformer extinformers.IngressInformer
|
||||
ingressInformer netinformers.IngressInformer
|
||||
ignoreIngressTLSSpec bool
|
||||
ignoreIngressRulesSpec bool
|
||||
labelSelector labels.Selector
|
||||
}
|
||||
|
||||
// NewIngressSource creates a new ingressSource with the given config.
|
||||
func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilter string, fqdnTemplate string, combineFqdnAnnotation bool, ignoreHostnameAnnotation bool, ignoreIngressTLSSpec bool, ignoreIngressRulesSpec bool) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilter string, fqdnTemplate string, combineFqdnAnnotation bool, ignoreHostnameAnnotation bool, ignoreIngressTLSSpec bool, ignoreIngressRulesSpec bool, labelSelector labels.Selector) (Source, error) {
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared informer to listen for add/update/delete of ingresses in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed.
|
||||
informerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 0, kubeinformers.WithNamespace(namespace))
|
||||
ingressInformer := informerFactory.Extensions().V1beta1().Ingresses()
|
||||
ingressInformer := informerFactory.Networking().V1().Ingresses()
|
||||
|
||||
// Add default resource event handlers to properly initialize informer.
|
||||
ingressInformer.Informer().AddEventHandler(
|
||||
@ -96,11 +87,8 @@ func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilt
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return ingressInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sc := &ingressSource{
|
||||
@ -113,6 +101,7 @@ func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilt
|
||||
ingressInformer: ingressInformer,
|
||||
ignoreIngressTLSSpec: ignoreIngressTLSSpec,
|
||||
ignoreIngressRulesSpec: ignoreIngressRulesSpec,
|
||||
labelSelector: labelSelector,
|
||||
}
|
||||
return sc, nil
|
||||
}
|
||||
@ -120,7 +109,7 @@ func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilt
|
||||
// Endpoints returns endpoint objects for each host-target combination that should be processed.
|
||||
// Retrieves all ingress resources on all namespaces
|
||||
func (sc *ingressSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
ingresses, err := sc.ingressInformer.Lister().Ingresses(sc.namespace).List(labels.Everything())
|
||||
ingresses, err := sc.ingressInformer.Lister().Ingresses(sc.namespace).List(sc.labelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -174,23 +163,18 @@ func (sc *ingressSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, e
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (sc *ingressSource) endpointsFromTemplate(ing *v1beta1.Ingress) ([]*endpoint.Endpoint, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, ing)
|
||||
func (sc *ingressSource) endpointsFromTemplate(ing *networkv1.Ingress) ([]*endpoint.Endpoint, error) {
|
||||
hostnames, err := execTemplate(sc.fqdnTemplate, ing)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on ingress %s: %v", ing.String(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostnames := buf.String()
|
||||
|
||||
ttl, err := getTTLFromAnnotations(ing.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
targets := getTargetsFromTargetAnnotation(ing.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets = targetsFromIngressStatus(ing.Status)
|
||||
}
|
||||
@ -198,17 +182,14 @@ func (sc *ingressSource) endpointsFromTemplate(ing *v1beta1.Ingress) ([]*endpoin
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(ing.Annotations)
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
// splits the FQDN template and removes the trailing periods
|
||||
hostnameList := strings.Split(strings.Replace(hostnames, " ", "", -1), ",")
|
||||
for _, hostname := range hostnameList {
|
||||
hostname = strings.TrimSuffix(hostname, ".")
|
||||
for _, hostname := range hostnames {
|
||||
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// filterByAnnotations filters a list of ingresses by a given annotation selector.
|
||||
func (sc *ingressSource) filterByAnnotations(ingresses []*v1beta1.Ingress) ([]*v1beta1.Ingress, error) {
|
||||
func (sc *ingressSource) filterByAnnotations(ingresses []*networkv1.Ingress) ([]*networkv1.Ingress, error) {
|
||||
selector, err := getLabelSelector(sc.annotationFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -219,7 +200,7 @@ func (sc *ingressSource) filterByAnnotations(ingresses []*v1beta1.Ingress) ([]*v
|
||||
return ingresses, nil
|
||||
}
|
||||
|
||||
filteredList := []*v1beta1.Ingress{}
|
||||
filteredList := []*networkv1.Ingress{}
|
||||
|
||||
for _, ingress := range ingresses {
|
||||
// include ingress if its annotations match the selector
|
||||
@ -231,13 +212,13 @@ func (sc *ingressSource) filterByAnnotations(ingresses []*v1beta1.Ingress) ([]*v
|
||||
return filteredList, nil
|
||||
}
|
||||
|
||||
func (sc *ingressSource) setResourceLabel(ingress *v1beta1.Ingress, endpoints []*endpoint.Endpoint) {
|
||||
func (sc *ingressSource) setResourceLabel(ingress *networkv1.Ingress, endpoints []*endpoint.Endpoint) {
|
||||
for _, ep := range endpoints {
|
||||
ep.Labels[endpoint.ResourceLabelKey] = fmt.Sprintf("ingress/%s/%s", ingress.Namespace, ingress.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *ingressSource) setDualstackLabel(ingress *v1beta1.Ingress, endpoints []*endpoint.Endpoint) {
|
||||
func (sc *ingressSource) setDualstackLabel(ingress *networkv1.Ingress, endpoints []*endpoint.Endpoint) {
|
||||
val, ok := ingress.Annotations[ALBDualstackAnnotationKey]
|
||||
if ok && val == ALBDualstackAnnotationValue {
|
||||
log.Debugf("Adding dualstack label to ingress %s/%s.", ingress.Namespace, ingress.Name)
|
||||
@ -248,7 +229,7 @@ func (sc *ingressSource) setDualstackLabel(ingress *v1beta1.Ingress, endpoints [
|
||||
}
|
||||
|
||||
// endpointsFromIngress extracts the endpoints from ingress object
|
||||
func endpointsFromIngress(ing *v1beta1.Ingress, ignoreHostnameAnnotation bool, ignoreIngressTLSSpec bool, ignoreIngressRulesSpec bool) []*endpoint.Endpoint {
|
||||
func endpointsFromIngress(ing *networkv1.Ingress, ignoreHostnameAnnotation bool, ignoreIngressTLSSpec bool, ignoreIngressRulesSpec bool) []*endpoint.Endpoint {
|
||||
ttl, err := getTTLFromAnnotations(ing.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
@ -311,7 +292,7 @@ func endpointsFromIngress(ing *v1beta1.Ingress, ignoreHostnameAnnotation bool, i
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func targetsFromIngressStatus(status v1beta1.IngressStatus) endpoint.Targets {
|
||||
func targetsFromIngressStatus(status networkv1.IngressStatus) endpoint.Targets {
|
||||
var targets endpoint.Targets
|
||||
|
||||
for _, lb := range status.LoadBalancer.Ingress {
|
||||
@ -331,17 +312,5 @@ func (sc *ingressSource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
|
||||
// Right now there is no way to remove event handler from informer, see:
|
||||
// https://github.com/kubernetes/kubernetes/issues/79610
|
||||
sc.ingressInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
handler()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
},
|
||||
)
|
||||
sc.ingressInformer.Informer().AddEventHandler(eventHandlerFunc(handler))
|
||||
}
|
||||
|
@ -19,14 +19,14 @@ package source
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
networkv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
@ -38,12 +38,22 @@ var _ Source = &ingressSource{}
|
||||
type IngressSuite struct {
|
||||
suite.Suite
|
||||
sc Source
|
||||
fooWithTargets *v1beta1.Ingress
|
||||
fooWithTargets *networkv1.Ingress
|
||||
}
|
||||
|
||||
func (suite *IngressSuite) SetupTest() {
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
var err error
|
||||
|
||||
suite.fooWithTargets = (fakeIngress{
|
||||
name: "foo-with-targets",
|
||||
namespace: "default",
|
||||
dnsnames: []string{"foo"},
|
||||
ips: []string{"8.8.8.8"},
|
||||
hostnames: []string{"v1"},
|
||||
annotations: map[string]string{ALBDualstackAnnotationKey: ALBDualstackAnnotationValue},
|
||||
}).Ingress()
|
||||
_, err := fakeClient.NetworkingV1().Ingresses(suite.fooWithTargets.Namespace).Create(context.Background(), suite.fooWithTargets, metav1.CreateOptions{})
|
||||
suite.NoError(err, "should succeed")
|
||||
|
||||
suite.sc, err = NewIngressSource(
|
||||
fakeClient,
|
||||
@ -54,19 +64,9 @@ func (suite *IngressSuite) SetupTest() {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
labels.Everything(),
|
||||
)
|
||||
suite.NoError(err, "should initialize ingress source")
|
||||
|
||||
suite.fooWithTargets = (fakeIngress{
|
||||
name: "foo-with-targets",
|
||||
namespace: "default",
|
||||
dnsnames: []string{"foo"},
|
||||
ips: []string{"8.8.8.8"},
|
||||
hostnames: []string{"v1"},
|
||||
annotations: map[string]string{ALBDualstackAnnotationKey: ALBDualstackAnnotationValue},
|
||||
}).Ingress()
|
||||
_, err = fakeClient.ExtensionsV1beta1().Ingresses(suite.fooWithTargets.Namespace).Create(context.Background(), suite.fooWithTargets, metav1.CreateOptions{})
|
||||
suite.NoError(err, "should succeed")
|
||||
}
|
||||
|
||||
func (suite *IngressSuite) TestResourceLabelIsSet() {
|
||||
@ -84,6 +84,8 @@ func (suite *IngressSuite) TestDualstackLabelIsSet() {
|
||||
}
|
||||
|
||||
func TestIngress(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(IngressSuite))
|
||||
t.Run("endpointsFromIngress", testEndpointsFromIngress)
|
||||
t.Run("endpointsFromIngressHostnameSourceAnnotation", testEndpointsFromIngressHostnameSourceAnnotation)
|
||||
@ -91,6 +93,8 @@ func TestIngress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewIngressSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
annotationFilter string
|
||||
@ -129,7 +133,10 @@ func TestNewIngressSource(t *testing.T) {
|
||||
annotationFilter: "kubernetes.io/ingress.class=nginx",
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := NewIngressSource(
|
||||
fake.NewSimpleClientset(),
|
||||
"",
|
||||
@ -139,6 +146,7 @@ func TestNewIngressSource(t *testing.T) {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
labels.Everything(),
|
||||
)
|
||||
if ti.expectError {
|
||||
assert.Error(t, err)
|
||||
@ -150,6 +158,8 @@ func TestNewIngressSource(t *testing.T) {
|
||||
}
|
||||
|
||||
func testEndpointsFromIngress(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
ingress fakeIngress
|
||||
@ -336,6 +346,8 @@ func testEndpointsFromIngressHostnameSourceAnnotation(t *testing.T) {
|
||||
}
|
||||
|
||||
func testIngressEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
namespace := "testing"
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
@ -349,6 +361,7 @@ func testIngressEndpoints(t *testing.T) {
|
||||
ignoreHostnameAnnotation bool
|
||||
ignoreIngressTLSSpec bool
|
||||
ignoreIngressRulesSpec bool
|
||||
ingressLabelSelector labels.Selector
|
||||
}{
|
||||
{
|
||||
title: "no ingress",
|
||||
@ -995,6 +1008,9 @@ func testIngressEndpoints(t *testing.T) {
|
||||
DNSName: "example.org",
|
||||
Targets: endpoint.Targets{"ingress-target.com"},
|
||||
RecordType: endpoint.RecordTypeCNAME,
|
||||
ProviderSpecific: endpoint.ProviderSpecific{{
|
||||
Name: "alias", Value: "true",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1157,14 +1173,57 @@ func testIngressEndpoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ingressLabelSelector: labels.SelectorFromSet(labels.Set{"app": "web-external"}),
|
||||
title: "ingress with matching labels",
|
||||
targetNamespace: "",
|
||||
ingressItems: []fakeIngress{
|
||||
{
|
||||
name: "fake1",
|
||||
namespace: namespace,
|
||||
dnsnames: []string{"example.org"},
|
||||
ips: []string{"8.8.8.8"},
|
||||
labels: map[string]string{"app": "web-external", "name": "reverse-proxy"},
|
||||
},
|
||||
},
|
||||
expected: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "example.org",
|
||||
Targets: endpoint.Targets{"8.8.8.8"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ingressLabelSelector: labels.SelectorFromSet(labels.Set{"app": "web-external"}),
|
||||
title: "ingress without matching labels",
|
||||
targetNamespace: "",
|
||||
ingressItems: []fakeIngress{
|
||||
{
|
||||
name: "fake1",
|
||||
namespace: namespace,
|
||||
dnsnames: []string{"example.org"},
|
||||
ips: []string{"8.8.8.8"},
|
||||
labels: map[string]string{"app": "web-internal", "name": "reverse-proxy"},
|
||||
},
|
||||
},
|
||||
expected: []*endpoint.Endpoint{},
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
ingresses := make([]*v1beta1.Ingress, 0)
|
||||
for _, item := range ti.ingressItems {
|
||||
ingresses = append(ingresses, item.Ingress())
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
for _, item := range ti.ingressItems {
|
||||
ingress := item.Ingress()
|
||||
_, err := fakeClient.NetworkingV1().Ingresses(ingress.Namespace).Create(context.Background(), ingress, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if ti.ingressLabelSelector == nil {
|
||||
ti.ingressLabelSelector = labels.Everything()
|
||||
}
|
||||
|
||||
source, _ := NewIngressSource(
|
||||
fakeClient,
|
||||
ti.targetNamespace,
|
||||
@ -1174,36 +1233,8 @@ func testIngressEndpoints(t *testing.T) {
|
||||
ti.ignoreHostnameAnnotation,
|
||||
ti.ignoreIngressTLSSpec,
|
||||
ti.ignoreIngressRulesSpec,
|
||||
ti.ingressLabelSelector,
|
||||
)
|
||||
for _, ingress := range ingresses {
|
||||
_, err := fakeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.Background(), ingress, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Wait for the Ingress resources to be visible to the source. We check the
|
||||
// source's informer cache to detect when this occurs. (This violates encapsulation
|
||||
// but is okay as this is a test and we want to ensure the informer's cache updates.)
|
||||
concreteIngressSource := source.(*ingressSource)
|
||||
ingressLister := concreteIngressSource.ingressInformer.Lister()
|
||||
err := poll(250*time.Millisecond, 6*time.Second, func() (bool, error) {
|
||||
allIngressesPresent := true
|
||||
for _, ingress := range ingresses {
|
||||
// Skip ingresses that the source would also skip.
|
||||
if ti.targetNamespace != "" && ti.targetNamespace != ingress.Namespace {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for the presence of this ingress.
|
||||
_, err := ingressLister.Ingresses(ingress.Namespace).Get(ingress.Name)
|
||||
if err != nil {
|
||||
allIngressesPresent = false
|
||||
break
|
||||
}
|
||||
}
|
||||
return allIngressesPresent, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Informer cache has all of the ingresses. Retrieve and validate their endpoints.
|
||||
res, err := source.Endpoints(context.Background())
|
||||
if ti.expectError {
|
||||
@ -1225,31 +1256,33 @@ type fakeIngress struct {
|
||||
namespace string
|
||||
name string
|
||||
annotations map[string]string
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (ing fakeIngress) Ingress() *v1beta1.Ingress {
|
||||
ingress := &v1beta1.Ingress{
|
||||
func (ing fakeIngress) Ingress() *networkv1.Ingress {
|
||||
ingress := &networkv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ing.namespace,
|
||||
Name: ing.name,
|
||||
Annotations: ing.annotations,
|
||||
Labels: ing.labels,
|
||||
},
|
||||
Spec: v1beta1.IngressSpec{
|
||||
Rules: []v1beta1.IngressRule{},
|
||||
Spec: networkv1.IngressSpec{
|
||||
Rules: []networkv1.IngressRule{},
|
||||
},
|
||||
Status: v1beta1.IngressStatus{
|
||||
Status: networkv1.IngressStatus{
|
||||
LoadBalancer: v1.LoadBalancerStatus{
|
||||
Ingress: []v1.LoadBalancerIngress{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, dnsname := range ing.dnsnames {
|
||||
ingress.Spec.Rules = append(ingress.Spec.Rules, v1beta1.IngressRule{
|
||||
ingress.Spec.Rules = append(ingress.Spec.Rules, networkv1.IngressRule{
|
||||
Host: dnsname,
|
||||
})
|
||||
}
|
||||
for _, hosts := range ing.tlsdnsnames {
|
||||
ingress.Spec.TLS = append(ingress.Spec.TLS, v1beta1.IngressTLS{
|
||||
ingress.Spec.TLS = append(ingress.Spec.TLS, networkv1.IngressTLS{
|
||||
Hosts: hosts,
|
||||
})
|
||||
}
|
||||
|
@ -1,358 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
contour "github.com/projectcontour/contour/apis/contour/v1beta1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
)
|
||||
|
||||
// ingressRouteSource is an implementation of Source for ProjectContour IngressRoute objects.
|
||||
// The IngressRoute implementation uses the spec.virtualHost.fqdn value for the hostname.
|
||||
// Use targetAnnotationKey to explicitly set Endpoint.
|
||||
type ingressRouteSource struct {
|
||||
dynamicKubeClient dynamic.Interface
|
||||
kubeClient kubernetes.Interface
|
||||
contourLoadBalancerService string
|
||||
namespace string
|
||||
annotationFilter string
|
||||
fqdnTemplate *template.Template
|
||||
combineFQDNAnnotation bool
|
||||
ignoreHostnameAnnotation bool
|
||||
ingressRouteInformer informers.GenericInformer
|
||||
unstructuredConverter *UnstructuredConverter
|
||||
}
|
||||
|
||||
// NewContourIngressRouteSource creates a new contourIngressRouteSource with the given config.
|
||||
func NewContourIngressRouteSource(
|
||||
dynamicKubeClient dynamic.Interface,
|
||||
kubeClient kubernetes.Interface,
|
||||
contourLoadBalancerService string,
|
||||
namespace string,
|
||||
annotationFilter string,
|
||||
fqdnTemplate string,
|
||||
combineFqdnAnnotation bool,
|
||||
ignoreHostnameAnnotation bool,
|
||||
) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if _, _, err = parseContourLoadBalancerService(contourLoadBalancerService); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use shared informer to listen for add/update/delete of ingressroutes in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed.
|
||||
informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicKubeClient, 0, namespace, nil)
|
||||
ingressRouteInformer := informerFactory.ForResource(contour.IngressRouteGVR)
|
||||
|
||||
// Add default resource event handlers to properly initialize informer.
|
||||
ingressRouteInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// TODO informer is not explicitly stopped since controller is not passing in its channel.
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return ingressRouteInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
}
|
||||
|
||||
uc, err := NewUnstructuredConverter()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to setup Unstructured Converter: %v", err)
|
||||
}
|
||||
|
||||
return &ingressRouteSource{
|
||||
dynamicKubeClient: dynamicKubeClient,
|
||||
kubeClient: kubeClient,
|
||||
contourLoadBalancerService: contourLoadBalancerService,
|
||||
namespace: namespace,
|
||||
annotationFilter: annotationFilter,
|
||||
fqdnTemplate: tmpl,
|
||||
combineFQDNAnnotation: combineFqdnAnnotation,
|
||||
ignoreHostnameAnnotation: ignoreHostnameAnnotation,
|
||||
ingressRouteInformer: ingressRouteInformer,
|
||||
unstructuredConverter: uc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Endpoints returns endpoint objects for each host-target combination that should be processed.
|
||||
// Retrieves all ingressroute resources in the source's namespace(s).
|
||||
func (sc *ingressRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
irs, err := sc.ingressRouteInformer.Lister().ByNamespace(sc.namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert to []*contour.IngressRoute
|
||||
var ingressRoutes []*contour.IngressRoute
|
||||
for _, ir := range irs {
|
||||
unstrucuredIR, ok := ir.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert")
|
||||
}
|
||||
|
||||
irConverted := &contour.IngressRoute{}
|
||||
err := sc.unstructuredConverter.scheme.Convert(unstrucuredIR, irConverted, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ingressRoutes = append(ingressRoutes, irConverted)
|
||||
}
|
||||
|
||||
ingressRoutes, err = sc.filterByAnnotations(ingressRoutes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoints := []*endpoint.Endpoint{}
|
||||
|
||||
for _, ir := range ingressRoutes {
|
||||
// Check controller annotation to see if we are responsible.
|
||||
controller, ok := ir.Annotations[controllerAnnotationKey]
|
||||
if ok && controller != controllerAnnotationValue {
|
||||
log.Debugf("Skipping ingressroute %s/%s because controller value does not match, found: %s, required: %s",
|
||||
ir.Namespace, ir.Name, controller, controllerAnnotationValue)
|
||||
continue
|
||||
} else if ir.CurrentStatus != "valid" {
|
||||
log.Debugf("Skipping ingressroute %s/%s because it is not valid", ir.Namespace, ir.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
irEndpoints, err := sc.endpointsFromIngressRoute(ctx, ir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply template if fqdn is missing on ingressroute
|
||||
if (sc.combineFQDNAnnotation || len(irEndpoints) == 0) && sc.fqdnTemplate != nil {
|
||||
tmplEndpoints, err := sc.endpointsFromTemplate(ctx, ir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sc.combineFQDNAnnotation {
|
||||
irEndpoints = append(irEndpoints, tmplEndpoints...)
|
||||
} else {
|
||||
irEndpoints = tmplEndpoints
|
||||
}
|
||||
}
|
||||
|
||||
if len(irEndpoints) == 0 {
|
||||
log.Debugf("No endpoints could be generated from ingressroute %s/%s", ir.Namespace, ir.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Endpoints generated from ingressroute: %s/%s: %v", ir.Namespace, ir.Name, irEndpoints)
|
||||
sc.setResourceLabel(ir, irEndpoints)
|
||||
endpoints = append(endpoints, irEndpoints...)
|
||||
}
|
||||
|
||||
for _, ep := range endpoints {
|
||||
sort.Sort(ep.Targets)
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (sc *ingressRouteSource) endpointsFromTemplate(ctx context.Context, ingressRoute *contour.IngressRoute) ([]*endpoint.Endpoint, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, ingressRoute)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on ingressroute %s/%s: %v", ingressRoute.Namespace, ingressRoute.Name, err)
|
||||
}
|
||||
|
||||
hostnames := buf.String()
|
||||
|
||||
ttl, err := getTTLFromAnnotations(ingressRoute.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
targets := getTargetsFromTargetAnnotation(ingressRoute.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets, err = sc.targetsFromContourLoadBalancer(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(ingressRoute.Annotations)
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
// splits the FQDN template and removes the trailing periods
|
||||
hostnameList := strings.Split(strings.Replace(hostnames, " ", "", -1), ",")
|
||||
for _, hostname := range hostnameList {
|
||||
hostname = strings.TrimSuffix(hostname, ".")
|
||||
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// filterByAnnotations filters a list of configs by a given annotation selector.
|
||||
func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contour.IngressRoute) ([]*contour.IngressRoute, error) {
|
||||
labelSelector, err := metav1.ParseToLabelSelector(sc.annotationFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// empty filter returns original list
|
||||
if selector.Empty() {
|
||||
return ingressRoutes, nil
|
||||
}
|
||||
|
||||
filteredList := []*contour.IngressRoute{}
|
||||
|
||||
for _, ingressRoute := range ingressRoutes {
|
||||
// convert the ingressroute's annotations to an equivalent label selector
|
||||
annotations := labels.Set(ingressRoute.Annotations)
|
||||
|
||||
// include ingressroute if its annotations match the selector
|
||||
if selector.Matches(annotations) {
|
||||
filteredList = append(filteredList, ingressRoute)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredList, nil
|
||||
}
|
||||
|
||||
func (sc *ingressRouteSource) setResourceLabel(ingressRoute *contour.IngressRoute, endpoints []*endpoint.Endpoint) {
|
||||
for _, ep := range endpoints {
|
||||
ep.Labels[endpoint.ResourceLabelKey] = fmt.Sprintf("ingressroute/%s/%s", ingressRoute.Namespace, ingressRoute.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *ingressRouteSource) targetsFromContourLoadBalancer(ctx context.Context) (targets endpoint.Targets, err error) {
|
||||
lbNamespace, lbName, err := parseContourLoadBalancerService(sc.contourLoadBalancerService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if svc, err := sc.kubeClient.CoreV1().Services(lbNamespace).Get(ctx, lbName, metav1.GetOptions{}); err != nil {
|
||||
log.Warn(err)
|
||||
} else {
|
||||
for _, lb := range svc.Status.LoadBalancer.Ingress {
|
||||
if lb.IP != "" {
|
||||
targets = append(targets, lb.IP)
|
||||
}
|
||||
if lb.Hostname != "" {
|
||||
targets = append(targets, lb.Hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// endpointsFromIngressRouteConfig extracts the endpoints from a Contour IngressRoute object
|
||||
func (sc *ingressRouteSource) endpointsFromIngressRoute(ctx context.Context, ingressRoute *contour.IngressRoute) ([]*endpoint.Endpoint, error) {
|
||||
if ingressRoute.CurrentStatus != "valid" {
|
||||
log.Warn(errors.Errorf("cannot generate endpoints for ingressroute with status %s", ingressRoute.CurrentStatus))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
|
||||
ttl, err := getTTLFromAnnotations(ingressRoute.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
targets := getTargetsFromTargetAnnotation(ingressRoute.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets, err = sc.targetsFromContourLoadBalancer(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(ingressRoute.Annotations)
|
||||
|
||||
if virtualHost := ingressRoute.Spec.VirtualHost; virtualHost != nil {
|
||||
if fqdn := virtualHost.Fqdn; fqdn != "" {
|
||||
endpoints = append(endpoints, endpointsForHostname(fqdn, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Skip endpoints if we do not want entries from annotations
|
||||
if !sc.ignoreHostnameAnnotation {
|
||||
hostnameList := getHostnamesFromAnnotations(ingressRoute.Annotations)
|
||||
for _, hostname := range hostnameList {
|
||||
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func parseContourLoadBalancerService(service string) (namespace, name string, err error) {
|
||||
parts := strings.Split(service, "/")
|
||||
if len(parts) != 2 {
|
||||
err = fmt.Errorf("invalid contour load balancer service (namespace/name) found '%v'", service)
|
||||
} else {
|
||||
namespace, name = parts[0], parts[1]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (sc *ingressRouteSource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -17,13 +17,11 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
|
||||
@ -66,19 +64,10 @@ func NewIstioGatewaySource(
|
||||
combineFQDNAnnotation bool,
|
||||
ignoreHostnameAnnotation bool,
|
||||
) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared informers to listen for add/update/delete of services/pods/nodes in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed
|
||||
@ -109,19 +98,11 @@ func NewIstioGatewaySource(
|
||||
istioInformerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return serviceInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return gatewayInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), istioInformerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gatewaySource{
|
||||
@ -169,7 +150,7 @@ func (sc *gatewaySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, e
|
||||
|
||||
// apply template if host is missing on gateway
|
||||
if (sc.combineFQDNAnnotation || len(gwHostnames) == 0) && sc.fqdnTemplate != nil {
|
||||
iHostnames, err := sc.hostNamesFromTemplate(gateway)
|
||||
iHostnames, err := execTemplate(sc.fqdnTemplate, &gateway)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -207,19 +188,7 @@ func (sc *gatewaySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, e
|
||||
func (sc *gatewaySource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
log.Debug("Adding event handler for Istio Gateway")
|
||||
|
||||
sc.gatewayInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
handler()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
},
|
||||
)
|
||||
sc.gatewayInformer.Informer().AddEventHandler(eventHandlerFunc(handler))
|
||||
}
|
||||
|
||||
// filterByAnnotations filters a list of configs by a given annotation selector.
|
||||
@ -345,18 +314,6 @@ func (sc *gatewaySource) hostNamesFromGateway(gateway networkingv1alpha3.Gateway
|
||||
return hostnames, nil
|
||||
}
|
||||
|
||||
func (sc *gatewaySource) hostNamesFromTemplate(gateway networkingv1alpha3.Gateway) ([]string, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, gateway)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on istio gateway %v: %v", gateway, err)
|
||||
}
|
||||
|
||||
hostnames := strings.Split(strings.Replace(buf.String(), " ", "", -1), ",")
|
||||
return hostnames, nil
|
||||
}
|
||||
|
||||
func gatewaySelectorMatchesServiceSelector(gwSelector, svcSelector map[string]string) bool {
|
||||
for k, v := range gwSelector {
|
||||
if lbl, ok := svcSelector[k]; !ok || lbl != v {
|
@ -26,7 +26,6 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
networkingv1alpha3api "istio.io/api/networking/v1alpha3"
|
||||
networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
|
||||
istioclient "istio.io/client-go/pkg/clientset/versioned"
|
||||
istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -46,7 +45,7 @@ type GatewaySuite struct {
|
||||
|
||||
func (suite *GatewaySuite) SetupTest() {
|
||||
fakeKubernetesClient := fake.NewSimpleClientset()
|
||||
fakeIstioClient := NewFakeConfigStore()
|
||||
fakeIstioClient := istiofake.NewSimpleClientset()
|
||||
var err error
|
||||
|
||||
suite.lbServices = []*v1.Service{
|
||||
@ -90,12 +89,16 @@ func (suite *GatewaySuite) TestResourceLabelIsSet() {
|
||||
}
|
||||
|
||||
func TestGateway(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(GatewaySuite))
|
||||
t.Run("endpointsFromGatewayConfig", testEndpointsFromGatewayConfig)
|
||||
t.Run("Endpoints", testGatewayEndpoints)
|
||||
}
|
||||
|
||||
func TestNewIstioGatewaySource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
annotationFilter string
|
||||
@ -134,10 +137,13 @@ func TestNewIstioGatewaySource(t *testing.T) {
|
||||
annotationFilter: "kubernetes.io/gateway.class=nginx",
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := NewIstioGatewaySource(
|
||||
fake.NewSimpleClientset(),
|
||||
NewFakeConfigStore(),
|
||||
istiofake.NewSimpleClientset(),
|
||||
"",
|
||||
ti.annotationFilter,
|
||||
ti.fqdnTemplate,
|
||||
@ -154,6 +160,8 @@ func TestNewIstioGatewaySource(t *testing.T) {
|
||||
}
|
||||
|
||||
func testEndpointsFromGatewayConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
lbServices []fakeIngressGatewayService
|
||||
@ -306,7 +314,10 @@ func testEndpointsFromGatewayConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
gatewayCfg := ti.config.Config()
|
||||
if source, err := newTestGatewaySource(ti.lbServices); err != nil {
|
||||
require.NoError(t, err)
|
||||
@ -322,6 +333,8 @@ func testEndpointsFromGatewayConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func testGatewayEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
targetNamespace string
|
||||
@ -1132,7 +1145,9 @@ func testGatewayEndpoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fakeKubernetesClient := fake.NewSimpleClientset()
|
||||
|
||||
@ -1142,7 +1157,7 @@ func testGatewayEndpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
fakeIstioClient := NewFakeConfigStore()
|
||||
fakeIstioClient := istiofake.NewSimpleClientset()
|
||||
for _, config := range ti.configItems {
|
||||
gatewayCfg := config.Config()
|
||||
_, err := fakeIstioClient.NetworkingV1alpha3().Gateways(ti.targetNamespace).Create(context.Background(), &gatewayCfg, metav1.CreateOptions{})
|
||||
@ -1175,7 +1190,7 @@ func testGatewayEndpoints(t *testing.T) {
|
||||
// gateway specific helper functions
|
||||
func newTestGatewaySource(loadBalancerList []fakeIngressGatewayService) (*gatewaySource, error) {
|
||||
fakeKubernetesClient := fake.NewSimpleClientset()
|
||||
fakeIstioClient := NewFakeConfigStore()
|
||||
fakeIstioClient := istiofake.NewSimpleClientset()
|
||||
|
||||
for _, lb := range loadBalancerList {
|
||||
service := lb.Service()
|
||||
@ -1276,7 +1291,3 @@ func (c fakeGatewayConfig) Config() networkingv1alpha3.Gateway {
|
||||
|
||||
return gw
|
||||
}
|
||||
|
||||
func NewFakeConfigStore() istioclient.Interface {
|
||||
return istiofake.NewSimpleClientset()
|
||||
}
|
@ -17,13 +17,11 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
|
||||
|
||||
@ -70,19 +68,10 @@ func NewIstioVirtualServiceSource(
|
||||
combineFQDNAnnotation bool,
|
||||
ignoreHostnameAnnotation bool,
|
||||
) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared informers to listen for add/update/delete of services/pods/nodes in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed
|
||||
@ -113,18 +102,11 @@ func NewIstioVirtualServiceSource(
|
||||
istioInformerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return serviceInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return virtualServiceInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), istioInformerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &virtualServiceSource{
|
||||
@ -205,19 +187,7 @@ func (sc *virtualServiceSource) Endpoints(ctx context.Context) ([]*endpoint.Endp
|
||||
func (sc *virtualServiceSource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
log.Debug("Adding event handler for Istio VirtualService")
|
||||
|
||||
sc.virtualserviceInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
handler()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
},
|
||||
)
|
||||
sc.virtualserviceInformer.Informer().AddEventHandler(eventHandlerFunc(handler))
|
||||
}
|
||||
|
||||
func (sc *virtualServiceSource) getGateway(ctx context.Context, gatewayStr string, virtualService networkingv1alpha3.VirtualService) *networkingv1alpha3.Gateway {
|
||||
@ -249,28 +219,20 @@ func (sc *virtualServiceSource) getGateway(ctx context.Context, gatewayStr strin
|
||||
}
|
||||
|
||||
func (sc *virtualServiceSource) endpointsFromTemplate(ctx context.Context, virtualService networkingv1alpha3.VirtualService) ([]*endpoint.Endpoint, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, virtualService)
|
||||
hostnames, err := execTemplate(sc.fqdnTemplate, &virtualService)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on istio config %v: %v", virtualService, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostnamesTemplate := buf.String()
|
||||
|
||||
ttl, err := getTTLFromAnnotations(virtualService.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(virtualService.Annotations)
|
||||
|
||||
// splits the FQDN template and removes the trailing periods
|
||||
hostnames := strings.Split(strings.Replace(hostnamesTemplate, " ", "", -1), ",")
|
||||
var endpoints []*endpoint.Endpoint
|
||||
for _, hostname := range hostnames {
|
||||
hostname = strings.TrimSuffix(hostname, ".")
|
||||
targets, err := sc.targetsFromVirtualService(ctx, virtualService, hostname)
|
||||
if err != nil {
|
||||
return endpoints, err
|
@ -28,6 +28,7 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
istionetworking "istio.io/api/networking/v1alpha3"
|
||||
networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
|
||||
istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
@ -47,7 +48,7 @@ type VirtualServiceSuite struct {
|
||||
|
||||
func (suite *VirtualServiceSuite) SetupTest() {
|
||||
fakeKubernetesClient := fake.NewSimpleClientset()
|
||||
fakeIstioClient := NewFakeConfigStore()
|
||||
fakeIstioClient := istiofake.NewSimpleClientset()
|
||||
var err error
|
||||
|
||||
suite.lbServices = []*v1.Service{
|
||||
@ -70,17 +71,6 @@ func (suite *VirtualServiceSuite) SetupTest() {
|
||||
suite.NoError(err, "should succeed")
|
||||
}
|
||||
|
||||
suite.source, err = NewIstioVirtualServiceSource(
|
||||
fakeKubernetesClient,
|
||||
fakeIstioClient,
|
||||
"",
|
||||
"",
|
||||
"{{.Name}}",
|
||||
false,
|
||||
false,
|
||||
)
|
||||
suite.NoError(err, "should initialize virtualservice source")
|
||||
|
||||
suite.gwconfig = (fakeGatewayConfig{
|
||||
name: "foo-gateway-with-targets",
|
||||
namespace: "istio-system",
|
||||
@ -97,6 +87,17 @@ func (suite *VirtualServiceSuite) SetupTest() {
|
||||
}).Config()
|
||||
_, err = fakeIstioClient.NetworkingV1alpha3().VirtualServices(suite.vsconfig.Namespace).Create(context.Background(), &suite.vsconfig, metav1.CreateOptions{})
|
||||
suite.NoError(err, "should succeed")
|
||||
|
||||
suite.source, err = NewIstioVirtualServiceSource(
|
||||
fakeKubernetesClient,
|
||||
fakeIstioClient,
|
||||
"",
|
||||
"",
|
||||
"{{.Name}}",
|
||||
false,
|
||||
false,
|
||||
)
|
||||
suite.NoError(err, "should initialize virtualservice source")
|
||||
}
|
||||
|
||||
func (suite *VirtualServiceSuite) TestResourceLabelIsSet() {
|
||||
@ -109,6 +110,8 @@ func (suite *VirtualServiceSuite) TestResourceLabelIsSet() {
|
||||
}
|
||||
|
||||
func TestVirtualService(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(VirtualServiceSuite))
|
||||
t.Run("virtualServiceBindsToGateway", testVirtualServiceBindsToGateway)
|
||||
t.Run("endpointsFromVirtualServiceConfig", testEndpointsFromVirtualServiceConfig)
|
||||
@ -117,6 +120,8 @@ func TestVirtualService(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewIstioVirtualServiceSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
annotationFilter string
|
||||
@ -155,10 +160,13 @@ func TestNewIstioVirtualServiceSource(t *testing.T) {
|
||||
annotationFilter: "kubernetes.io/gateway.class=nginx",
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := NewIstioVirtualServiceSource(
|
||||
fake.NewSimpleClientset(),
|
||||
NewFakeConfigStore(),
|
||||
istiofake.NewSimpleClientset(),
|
||||
"",
|
||||
ti.annotationFilter,
|
||||
ti.fqdnTemplate,
|
||||
@ -358,6 +366,8 @@ func testVirtualServiceBindsToGateway(t *testing.T) {
|
||||
}
|
||||
|
||||
func testEndpointsFromVirtualServiceConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
lbServices []fakeIngressGatewayService
|
||||
@ -537,7 +547,10 @@ func testEndpointsFromVirtualServiceConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if source, err := newTestVirtualServiceSource(ti.lbServices, []fakeGatewayConfig{ti.gwconfig}); err != nil {
|
||||
require.NoError(t, err)
|
||||
} else if endpoints, err := source.endpointsFromVirtualService(context.Background(), ti.vsconfig.Config()); err != nil {
|
||||
@ -550,6 +563,8 @@ func testEndpointsFromVirtualServiceConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func testVirtualServiceEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
namespace := "testing"
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
@ -1432,7 +1447,10 @@ func testVirtualServiceEndpoints(t *testing.T) {
|
||||
fqdnTemplate: "{{.Name}}.ext-dns.test.com",
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var gateways []networkingv1alpha3.Gateway
|
||||
var virtualservices []networkingv1alpha3.VirtualService
|
||||
|
||||
@ -1451,7 +1469,7 @@ func testVirtualServiceEndpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
fakeIstioClient := NewFakeConfigStore()
|
||||
fakeIstioClient := istiofake.NewSimpleClientset()
|
||||
|
||||
for _, gateway := range gateways {
|
||||
_, err := fakeIstioClient.NetworkingV1alpha3().Gateways(gateway.Namespace).Create(context.Background(), &gateway, metav1.CreateOptions{})
|
||||
@ -1520,7 +1538,7 @@ func testGatewaySelectorMatchesService(t *testing.T) {
|
||||
|
||||
func newTestVirtualServiceSource(loadBalancerList []fakeIngressGatewayService, gwList []fakeGatewayConfig) (*virtualServiceSource, error) {
|
||||
fakeKubernetesClient := fake.NewSimpleClientset()
|
||||
fakeIstioClient := NewFakeConfigStore()
|
||||
fakeIstioClient := istiofake.NewSimpleClientset()
|
||||
|
||||
for _, lb := range loadBalancerList {
|
||||
service := lb.Service()
|
@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -78,11 +77,8 @@ func NewKongTCPIngressSource(dynamicKubeClient dynamic.Interface, kubeClient kub
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return kongTCPIngressInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to sync cache")
|
||||
if err := waitForDynamicCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uc, err := newKongUnstructuredConverter()
|
||||
@ -243,19 +239,7 @@ func (sc *kongTCPIngressSource) AddEventHandler(ctx context.Context, handler fun
|
||||
|
||||
// Right now there is no way to remove event handler from informer, see:
|
||||
// https://github.com/kubernetes/kubernetes/issues/79610
|
||||
sc.kongTCPIngressInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
handler()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
},
|
||||
)
|
||||
sc.kongTCPIngressInformer.Informer().AddEventHandler(eventHandlerFunc(handler))
|
||||
}
|
||||
|
||||
// newUnstructuredConverter returns a new unstructuredConverter initialized
|
||||
|
@ -19,6 +19,8 @@ package source
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -27,7 +29,6 @@ import (
|
||||
fakeDynamic "k8s.io/client-go/dynamic/fake"
|
||||
fakeKube "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// This is a compile-time validation that glooSource is a Source.
|
||||
@ -36,6 +37,8 @@ var _ Source = &kongTCPIngressSource{}
|
||||
const defaultKongNamespace = "kong"
|
||||
|
||||
func TestKongTCPIngressEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
tcpProxy TCPIngress
|
||||
@ -218,7 +221,10 @@ func TestKongTCPIngressEndpoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fakeKubernetesClient := fakeKube.NewSimpleClientset()
|
||||
scheme := runtime.NewScheme()
|
||||
scheme.AddKnownTypes(kongGroupdVersionResource.GroupVersion(), &TCPIngress{}, &TCPIngressList{})
|
||||
|
@ -29,6 +29,8 @@ import (
|
||||
)
|
||||
|
||||
func TestMultiSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Interface", testMultiSourceImplementsSource)
|
||||
t.Run("Endpoints", testMultiSourceEndpoints)
|
||||
t.Run("EndpointsWithError", testMultiSourceEndpointsWithError)
|
||||
@ -71,7 +73,10 @@ func testMultiSourceEndpoints(t *testing.T) {
|
||||
[]*endpoint.Endpoint{foo, bar},
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Prepare the nested mock sources.
|
||||
sources := make([]Source, 0, len(tc.nestedEndpoints))
|
||||
|
@ -17,12 +17,9 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -46,19 +43,10 @@ type nodeSource struct {
|
||||
|
||||
// NewNodeSource creates a new nodeSource with the given config.
|
||||
func NewNodeSource(kubeClient kubernetes.Interface, annotationFilter, fqdnTemplate string) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared informers to listen for add/update/delete of nodes.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed
|
||||
@ -78,11 +66,8 @@ func NewNodeSource(kubeClient kubernetes.Interface, annotationFilter, fqdnTempla
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return nodeInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &nodeSource{
|
||||
@ -131,14 +116,15 @@ func (ns *nodeSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, erro
|
||||
}
|
||||
|
||||
if ns.fqdnTemplate != nil {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := ns.fqdnTemplate.Execute(&buf, node)
|
||||
hostnames, err := execTemplate(ns.fqdnTemplate, node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on node %s: %v", node.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ep.DNSName = buf.String()
|
||||
hostname := ""
|
||||
if len(hostnames) > 0 {
|
||||
hostname = hostnames[0]
|
||||
}
|
||||
ep.DNSName = hostname
|
||||
log.Debugf("applied template for %s, converting to %s", node.Name, ep.DNSName)
|
||||
} else {
|
||||
ep.DNSName = node.Name
|
||||
|
@ -30,12 +30,16 @@ import (
|
||||
)
|
||||
|
||||
func TestNodeSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("NewNodeSource", testNodeSourceNewNodeSource)
|
||||
t.Run("Endpoints", testNodeSourceEndpoints)
|
||||
}
|
||||
|
||||
// testNodeSourceNewNodeSource tests that NewNodeService doesn't return an error.
|
||||
func testNodeSourceNewNodeSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
annotationFilter string
|
||||
@ -62,7 +66,10 @@ func testNodeSourceNewNodeSource(t *testing.T) {
|
||||
annotationFilter: "kubernetes.io/ingress.class=nginx",
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := NewNodeSource(
|
||||
fake.NewSimpleClientset(),
|
||||
ti.annotationFilter,
|
||||
@ -80,6 +87,8 @@ func testNodeSourceNewNodeSource(t *testing.T) {
|
||||
|
||||
// testNodeSourceEndpoints tests that various node generate the correct endpoints.
|
||||
func testNodeSourceEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
title string
|
||||
annotationFilter string
|
||||
@ -321,7 +330,10 @@ func testNodeSourceEndpoints(t *testing.T) {
|
||||
false,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a Kubernetes testing client
|
||||
kubernetes := fake.NewSimpleClientset()
|
||||
|
||||
|
@ -17,13 +17,10 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
routev1 "github.com/openshift/api/route/v1"
|
||||
versioned "github.com/openshift/client-go/route/clientset/versioned"
|
||||
@ -51,6 +48,8 @@ type ocpRouteSource struct {
|
||||
combineFQDNAnnotation bool
|
||||
ignoreHostnameAnnotation bool
|
||||
routeInformer routeInformer.RouteInformer
|
||||
labelSelector labels.Selector
|
||||
ocpRouterName string
|
||||
}
|
||||
|
||||
// NewOcpRouteSource creates a new ocpRouteSource with the given config.
|
||||
@ -61,27 +60,21 @@ func NewOcpRouteSource(
|
||||
fqdnTemplate string,
|
||||
combineFQDNAnnotation bool,
|
||||
ignoreHostnameAnnotation bool,
|
||||
labelSelector labels.Selector,
|
||||
ocpRouterName string,
|
||||
) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use a shared informer to listen for add/update/delete of Routes in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed.
|
||||
informerFactory := extInformers.NewFilteredSharedInformerFactory(ocpClient, 0, namespace, nil)
|
||||
routeInformer := informerFactory.Route().V1().Routes()
|
||||
informerFactory := extInformers.NewSharedInformerFactoryWithOptions(ocpClient, 0, extInformers.WithNamespace(namespace))
|
||||
informer := informerFactory.Route().V1().Routes()
|
||||
|
||||
// Add default resource event handlers to properly initialize informer.
|
||||
routeInformer.Informer().AddEventHandler(
|
||||
informer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
},
|
||||
@ -92,11 +85,8 @@ func NewOcpRouteSource(
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return routeInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ocpRouteSource{
|
||||
@ -106,7 +96,9 @@ func NewOcpRouteSource(
|
||||
fqdnTemplate: tmpl,
|
||||
combineFQDNAnnotation: combineFQDNAnnotation,
|
||||
ignoreHostnameAnnotation: ignoreHostnameAnnotation,
|
||||
routeInformer: routeInformer,
|
||||
routeInformer: informer,
|
||||
labelSelector: labelSelector,
|
||||
ocpRouterName: ocpRouterName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -118,7 +110,7 @@ func (ors *ocpRouteSource) AddEventHandler(ctx context.Context, handler func())
|
||||
// Retrieves all OpenShift Route resources on all namespaces, unless an explicit namespace
|
||||
// is specified in ocpRouteSource.
|
||||
func (ors *ocpRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
ocpRoutes, err := ors.routeInformer.Lister().Routes(ors.namespace).List(labels.Everything())
|
||||
ocpRoutes, err := ors.routeInformer.Lister().Routes(ors.namespace).List(ors.labelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -139,7 +131,7 @@ func (ors *ocpRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint,
|
||||
continue
|
||||
}
|
||||
|
||||
orEndpoints := endpointsFromOcpRoute(ocpRoute, ors.ignoreHostnameAnnotation)
|
||||
orEndpoints := ors.endpointsFromOcpRoute(ocpRoute, ors.ignoreHostnameAnnotation)
|
||||
|
||||
// apply template if host is missing on OpenShift Route
|
||||
if (ors.combineFQDNAnnotation || len(orEndpoints) == 0) && ors.fqdnTemplate != nil {
|
||||
@ -173,33 +165,25 @@ func (ors *ocpRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint,
|
||||
}
|
||||
|
||||
func (ors *ocpRouteSource) endpointsFromTemplate(ocpRoute *routev1.Route) ([]*endpoint.Endpoint, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := ors.fqdnTemplate.Execute(&buf, ocpRoute)
|
||||
hostnames, err := execTemplate(ors.fqdnTemplate, ocpRoute)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on OpenShift Route %s: %s", ocpRoute.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostnames := buf.String()
|
||||
|
||||
ttl, err := getTTLFromAnnotations(ocpRoute.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
targets := getTargetsFromTargetAnnotation(ocpRoute.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets = targetsFromOcpRouteStatus(ocpRoute.Status)
|
||||
targets = ors.targetsFromOcpRouteStatus(ocpRoute.Status)
|
||||
}
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(ocpRoute.Annotations)
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
// splits the FQDN template and removes the trailing periods
|
||||
hostnameList := strings.Split(strings.Replace(hostnames, " ", "", -1), ",")
|
||||
for _, hostname := range hostnameList {
|
||||
hostname = strings.TrimSuffix(hostname, ".")
|
||||
for _, hostname := range hostnames {
|
||||
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
return endpoints, nil
|
||||
@ -242,7 +226,7 @@ func (ors *ocpRouteSource) setResourceLabel(ocpRoute *routev1.Route, endpoints [
|
||||
}
|
||||
|
||||
// endpointsFromOcpRoute extracts the endpoints from a OpenShift Route object
|
||||
func endpointsFromOcpRoute(ocpRoute *routev1.Route, ignoreHostnameAnnotation bool) []*endpoint.Endpoint {
|
||||
func (ors *ocpRouteSource) endpointsFromOcpRoute(ocpRoute *routev1.Route, ignoreHostnameAnnotation bool) []*endpoint.Endpoint {
|
||||
var endpoints []*endpoint.Endpoint
|
||||
|
||||
ttl, err := getTTLFromAnnotations(ocpRoute.Annotations)
|
||||
@ -253,7 +237,7 @@ func endpointsFromOcpRoute(ocpRoute *routev1.Route, ignoreHostnameAnnotation boo
|
||||
targets := getTargetsFromTargetAnnotation(ocpRoute.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets = targetsFromOcpRouteStatus(ocpRoute.Status)
|
||||
targets = ors.targetsFromOcpRouteStatus(ocpRoute.Status)
|
||||
}
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(ocpRoute.Annotations)
|
||||
@ -272,14 +256,18 @@ func endpointsFromOcpRoute(ocpRoute *routev1.Route, ignoreHostnameAnnotation boo
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func targetsFromOcpRouteStatus(status routev1.RouteStatus) endpoint.Targets {
|
||||
func (ors *ocpRouteSource) targetsFromOcpRouteStatus(status routev1.RouteStatus) endpoint.Targets {
|
||||
var targets endpoint.Targets
|
||||
|
||||
for _, ing := range status.Ingress {
|
||||
if ing.RouterCanonicalHostname != "" {
|
||||
if len(ors.ocpRouterName) != 0 {
|
||||
if ing.RouterName == ors.ocpRouterName {
|
||||
targets = append(targets, ing.RouterCanonicalHostname)
|
||||
return targets
|
||||
}
|
||||
} else if ing.RouterCanonicalHostname != "" {
|
||||
targets = append(targets, ing.RouterCanonicalHostname)
|
||||
return targets
|
||||
}
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
@ -19,11 +19,11 @@ package source
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
routev1 "github.com/openshift/api/route/v1"
|
||||
fake "github.com/openshift/client-go/route/clientset/versioned/fake"
|
||||
@ -49,6 +49,8 @@ func (suite *OCPRouteSuite) SetupTest() {
|
||||
"{{.Name}}",
|
||||
false,
|
||||
false,
|
||||
labels.Everything(),
|
||||
"",
|
||||
)
|
||||
|
||||
suite.routeWithTargets = &routev1.Route{
|
||||
@ -83,6 +85,8 @@ func (suite *OCPRouteSuite) TestResourceLabelIsSet() {
|
||||
}
|
||||
|
||||
func TestOcpRouteSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(OCPRouteSuite))
|
||||
t.Run("Interface", testOcpRouteSourceImplementsSource)
|
||||
t.Run("NewOcpRouteSource", testOcpRouteSourceNewOcpRouteSource)
|
||||
@ -96,11 +100,14 @@ func testOcpRouteSourceImplementsSource(t *testing.T) {
|
||||
|
||||
// testOcpRouteSourceNewOcpRouteSource tests that NewOcpRouteSource doesn't return an error.
|
||||
func testOcpRouteSourceNewOcpRouteSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, ti := range []struct {
|
||||
title string
|
||||
annotationFilter string
|
||||
fqdnTemplate string
|
||||
expectError bool
|
||||
labelFilter string
|
||||
}{
|
||||
{
|
||||
title: "invalid template",
|
||||
@ -121,8 +128,18 @@ func testOcpRouteSourceNewOcpRouteSource(t *testing.T) {
|
||||
expectError: false,
|
||||
annotationFilter: "kubernetes.io/ingress.class=nginx",
|
||||
},
|
||||
{
|
||||
title: "valid label selector",
|
||||
expectError: false,
|
||||
labelFilter: "app=web-external",
|
||||
},
|
||||
} {
|
||||
ti := ti
|
||||
labelSelector, err := labels.Parse(ti.labelFilter)
|
||||
require.NoError(t, err)
|
||||
t.Run(ti.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, err := NewOcpRouteSource(
|
||||
fake.NewSimpleClientset(),
|
||||
"",
|
||||
@ -130,6 +147,8 @@ func testOcpRouteSourceNewOcpRouteSource(t *testing.T) {
|
||||
ti.fqdnTemplate,
|
||||
false,
|
||||
false,
|
||||
labelSelector,
|
||||
"",
|
||||
)
|
||||
|
||||
if ti.expectError {
|
||||
@ -152,6 +171,8 @@ func testOcpRouteSourceEndpoints(t *testing.T) {
|
||||
ocpRoute *routev1.Route
|
||||
expected []*endpoint.Endpoint
|
||||
expectError bool
|
||||
labelFilter string
|
||||
ocpRouterName string
|
||||
}{
|
||||
{
|
||||
title: "route with basic hostname and route status target",
|
||||
@ -176,6 +197,7 @@ func testOcpRouteSourceEndpoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "",
|
||||
expected: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "my-domain.com",
|
||||
@ -186,6 +208,119 @@ func testOcpRouteSourceEndpoints(t *testing.T) {
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
title: "route with basic hostname and route status target with one RouterCanonicalHostname and one ocpRouterNames defined",
|
||||
targetNamespace: "",
|
||||
annotationFilter: "",
|
||||
fqdnTemplate: "",
|
||||
ignoreHostnameAnnotation: false,
|
||||
ocpRoute: &routev1.Route{
|
||||
Spec: routev1.RouteSpec{
|
||||
Host: "my-domain.com",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "route-with-target",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Status: routev1.RouteStatus{
|
||||
Ingress: []routev1.RouteIngress{
|
||||
{
|
||||
RouterName: "default",
|
||||
RouterCanonicalHostname: "router-default.my-domain.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "default",
|
||||
expected: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "my-domain.com",
|
||||
Targets: []string{
|
||||
"router-default.my-domain.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
title: "route with basic hostname and route status target with one RouterCanonicalHostname and one ocpRouterNames defined and two router canonical names",
|
||||
targetNamespace: "",
|
||||
annotationFilter: "",
|
||||
fqdnTemplate: "",
|
||||
ignoreHostnameAnnotation: false,
|
||||
ocpRoute: &routev1.Route{
|
||||
Spec: routev1.RouteSpec{
|
||||
Host: "my-domain.com",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "route-with-target",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Status: routev1.RouteStatus{
|
||||
Ingress: []routev1.RouteIngress{
|
||||
{
|
||||
RouterName: "default",
|
||||
RouterCanonicalHostname: "router-default.my-domain.com",
|
||||
},
|
||||
{
|
||||
RouterName: "test",
|
||||
RouterCanonicalHostname: "router-test.my-domain.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "default",
|
||||
expected: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "my-domain.com",
|
||||
Targets: []string{
|
||||
"router-default.my-domain.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
title: "route with basic hostname and route status target with one RouterCanonicalHostname and one ocpRouterName defined and two router canonical names",
|
||||
targetNamespace: "",
|
||||
annotationFilter: "",
|
||||
fqdnTemplate: "",
|
||||
ignoreHostnameAnnotation: false,
|
||||
ocpRoute: &routev1.Route{
|
||||
Spec: routev1.RouteSpec{
|
||||
Host: "my-domain.com",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "route-with-target",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Status: routev1.RouteStatus{
|
||||
Ingress: []routev1.RouteIngress{
|
||||
{
|
||||
RouterName: "default",
|
||||
RouterCanonicalHostname: "router-default.my-domain.com",
|
||||
},
|
||||
{
|
||||
RouterName: "test",
|
||||
RouterCanonicalHostname: "router-test.my-domain.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "default",
|
||||
expected: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "my-domain.com",
|
||||
Targets: []string{
|
||||
"router-default.my-domain.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
title: "route with incorrect externalDNS controller annotation",
|
||||
targetNamespace: "",
|
||||
@ -201,6 +336,7 @@ func testOcpRouteSourceEndpoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "",
|
||||
expected: []*endpoint.Endpoint{},
|
||||
expectError: false,
|
||||
},
|
||||
@ -222,6 +358,7 @@ func testOcpRouteSourceEndpoints(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "",
|
||||
expected: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "my-annotation-domain.com",
|
||||
@ -232,14 +369,75 @@ func testOcpRouteSourceEndpoints(t *testing.T) {
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
title: "route with matching labels",
|
||||
labelFilter: "app=web-external",
|
||||
ignoreHostnameAnnotation: false,
|
||||
ocpRoute: &routev1.Route{
|
||||
|
||||
Spec: routev1.RouteSpec{
|
||||
Host: "my-annotation-domain.com",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "route-with-matching-labels",
|
||||
Annotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/target": "my.site.foo.com",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "web-external",
|
||||
"name": "service-frontend",
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "",
|
||||
expected: []*endpoint.Endpoint{
|
||||
{
|
||||
DNSName: "my-annotation-domain.com",
|
||||
Targets: []string{
|
||||
"my.site.foo.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
title: "route without matching labels",
|
||||
labelFilter: "app=web-external",
|
||||
ignoreHostnameAnnotation: false,
|
||||
ocpRoute: &routev1.Route{
|
||||
|
||||
Spec: routev1.RouteSpec{
|
||||
Host: "my-annotation-domain.com",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "route-without-matching-labels",
|
||||
Annotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/target": "my.site.foo.com",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "web-internal",
|
||||
"name": "service-frontend",
|
||||
},
|
||||
},
|
||||
},
|
||||
ocpRouterName: "",
|
||||
expected: []*endpoint.Endpoint{},
|
||||
expectError: false,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Create a Kubernetes testing client
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
|
||||
_, err := fakeClient.RouteV1().Routes(tc.ocpRoute.Namespace).Create(context.Background(), tc.ocpRoute, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
labelSelector, err := labels.Parse(tc.labelFilter)
|
||||
require.NoError(t, err)
|
||||
|
||||
source, err := NewOcpRouteSource(
|
||||
fakeClient,
|
||||
"",
|
||||
@ -247,21 +445,13 @@ func testOcpRouteSourceEndpoints(t *testing.T) {
|
||||
"{{.Name}}",
|
||||
false,
|
||||
false,
|
||||
labelSelector,
|
||||
tc.ocpRouterName,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
var res []*endpoint.Endpoint
|
||||
|
||||
// wait up to a few seconds for new resources to appear in informer cache.
|
||||
err = poll(time.Second, 3*time.Second, func() (bool, error) {
|
||||
res, err = source.Endpoints(context.Background())
|
||||
if err != nil {
|
||||
// stop waiting if we get an error
|
||||
return true, err
|
||||
}
|
||||
return len(res) >= len(tc.expected), nil
|
||||
})
|
||||
|
||||
res, err := source.Endpoints(context.Background())
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
@ -18,8 +18,6 @@ package source
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
|
||||
@ -63,12 +61,8 @@ func NewPodSource(kubeClient kubernetes.Interface, namespace string, compatibili
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err := poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return podInformer.Informer().HasSynced() &&
|
||||
nodeInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &podSource{
|
||||
|
@ -29,6 +29,8 @@ import (
|
||||
|
||||
// testPodSource tests that various services generate the correct endpoints.
|
||||
func TestPodSource(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
title string
|
||||
targetNamespace string
|
||||
@ -387,7 +389,10 @@ func TestPodSource(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.title, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a Kubernetes testing client
|
||||
kubernetes := fake.NewSimpleClientset()
|
||||
ctx := context.Background()
|
||||
|
@ -17,13 +17,11 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -65,22 +63,15 @@ type serviceSource struct {
|
||||
podInformer coreinformers.PodInformer
|
||||
nodeInformer coreinformers.NodeInformer
|
||||
serviceTypeFilter map[string]struct{}
|
||||
labelSelector labels.Selector
|
||||
}
|
||||
|
||||
// NewServiceSource creates a new serviceSource with the given config.
|
||||
func NewServiceSource(kubeClient kubernetes.Interface, namespace, annotationFilter string, fqdnTemplate string, combineFqdnAnnotation bool, compatibility string, publishInternal bool, publishHostIP bool, alwaysPublishNotReadyAddresses bool, serviceTypeFilter []string, ignoreHostnameAnnotation bool) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
func NewServiceSource(kubeClient kubernetes.Interface, namespace, annotationFilter string, fqdnTemplate string, combineFqdnAnnotation bool, compatibility string, publishInternal bool, publishHostIP bool, alwaysPublishNotReadyAddresses bool, serviceTypeFilter []string, ignoreHostnameAnnotation bool, labelSelector labels.Selector) (Source, error) {
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared informers to listen for add/update/delete of services/pods/nodes in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed
|
||||
@ -120,14 +111,8 @@ func NewServiceSource(kubeClient kubernetes.Interface, namespace, annotationFilt
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return serviceInformer.Informer().HasSynced() &&
|
||||
endpointsInformer.Informer().HasSynced() &&
|
||||
podInformer.Informer().HasSynced() &&
|
||||
nodeInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
if err := waitForCacheSync(context.Background(), informerFactory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Transform the slice into a map so it will
|
||||
@ -153,12 +138,13 @@ func NewServiceSource(kubeClient kubernetes.Interface, namespace, annotationFilt
|
||||
podInformer: podInformer,
|
||||
nodeInformer: nodeInformer,
|
||||
serviceTypeFilter: serviceTypes,
|
||||
labelSelector: labelSelector,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Endpoints returns endpoint objects for each service that should be processed.
|
||||
func (sc *serviceSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
services, err := sc.serviceInformer.Lister().Services(sc.namespace).List(labels.Everything())
|
||||
services, err := sc.serviceInformer.Lister().Services(sc.namespace).List(sc.labelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -237,6 +223,7 @@ func (sc *serviceSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, e
|
||||
lastMergedEndpoint := len(mergedEndpoints) - 1
|
||||
if mergedEndpoints[lastMergedEndpoint].DNSName == endpoints[i].DNSName &&
|
||||
mergedEndpoints[lastMergedEndpoint].RecordType == endpoints[i].RecordType &&
|
||||
mergedEndpoints[lastMergedEndpoint].SetIdentifier == endpoints[i].SetIdentifier &&
|
||||
mergedEndpoints[lastMergedEndpoint].RecordTTL == endpoints[i].RecordTTL {
|
||||
mergedEndpoints[lastMergedEndpoint].Targets = append(mergedEndpoints[lastMergedEndpoint].Targets, endpoints[i].Targets[0])
|
||||
} else {
|
||||
@ -353,18 +340,15 @@ func (sc *serviceSource) extractHeadlessEndpoints(svc *v1.Service, hostname stri
|
||||
}
|
||||
|
||||
func (sc *serviceSource) endpointsFromTemplate(svc *v1.Service) ([]*endpoint.Endpoint, error) {
|
||||
var endpoints []*endpoint.Endpoint
|
||||
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, svc)
|
||||
hostnames, err := execTemplate(sc.fqdnTemplate, svc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on service %s: %v", svc.String(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(svc.Annotations)
|
||||
hostnameList := strings.Split(strings.Replace(buf.String(), " ", "", -1), ",")
|
||||
for _, hostname := range hostnameList {
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
for _, hostname := range hostnames {
|
||||
endpoints = append(endpoints, sc.generateEndpoints(svc, hostname, providerSpecific, setIdentifier, false)...)
|
||||
}
|
||||
|
||||
@ -670,17 +654,5 @@ func (sc *serviceSource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
|
||||
// Right now there is no way to remove event handler from informer, see:
|
||||
// https://github.com/kubernetes/kubernetes/issues/79610
|
||||
sc.serviceInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
handler()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
},
|
||||
)
|
||||
sc.serviceInformer.Informer().AddEventHandler(eventHandlerFunc(handler))
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -17,27 +17,46 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
)
|
||||
|
||||
// test helper functions
|
||||
func sortEndpoints(endpoints []*endpoint.Endpoint) {
|
||||
for _, ep := range endpoints {
|
||||
sort.Strings([]string(ep.Targets))
|
||||
}
|
||||
sort.Slice(endpoints, func(i, k int) bool {
|
||||
// Sort by DNSName and Targets
|
||||
ei, ek := endpoints[i], endpoints[k]
|
||||
if ei.DNSName != ek.DNSName {
|
||||
return ei.DNSName < ek.DNSName
|
||||
}
|
||||
// Targets are sorted ahead of time.
|
||||
for j, ti := range ei.Targets {
|
||||
if j >= len(ek.Targets) {
|
||||
return true
|
||||
}
|
||||
if tk := ek.Targets[j]; ti != tk {
|
||||
return ti < tk
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func validateEndpoints(t *testing.T, endpoints, expected []*endpoint.Endpoint) {
|
||||
t.Helper()
|
||||
|
||||
if len(endpoints) != len(expected) {
|
||||
t.Fatalf("expected %d endpoints, got %d", len(expected), len(endpoints))
|
||||
}
|
||||
|
||||
// Make sure endpoints are sorted - validateEndpoint() depends on it.
|
||||
sort.SliceStable(endpoints, func(i, j int) bool {
|
||||
return strings.Compare(endpoints[i].DNSName, endpoints[j].DNSName) < 0
|
||||
})
|
||||
sort.SliceStable(expected, func(i, j int) bool {
|
||||
return strings.Compare(expected[i].DNSName, expected[j].DNSName) < 0
|
||||
})
|
||||
sortEndpoints(endpoints)
|
||||
sortEndpoints(expected)
|
||||
|
||||
for i := range endpoints {
|
||||
validateEndpoint(t, endpoints[i], expected[i])
|
||||
@ -45,25 +64,36 @@ func validateEndpoints(t *testing.T, endpoints, expected []*endpoint.Endpoint) {
|
||||
}
|
||||
|
||||
func validateEndpoint(t *testing.T, endpoint, expected *endpoint.Endpoint) {
|
||||
t.Helper()
|
||||
|
||||
if endpoint.DNSName != expected.DNSName {
|
||||
t.Errorf("expected %s, got %s", expected.DNSName, endpoint.DNSName)
|
||||
t.Errorf("DNSName expected %q, got %q", expected.DNSName, endpoint.DNSName)
|
||||
}
|
||||
|
||||
if !endpoint.Targets.Same(expected.Targets) {
|
||||
t.Errorf("expected %s, got %s", expected.Targets, endpoint.Targets)
|
||||
t.Errorf("Targets expected %q, got %q", expected.Targets, endpoint.Targets)
|
||||
}
|
||||
|
||||
if endpoint.RecordTTL != expected.RecordTTL {
|
||||
t.Errorf("expected %v, got %v", expected.RecordTTL, endpoint.RecordTTL)
|
||||
t.Errorf("RecordTTL expected %v, got %v", expected.RecordTTL, endpoint.RecordTTL)
|
||||
}
|
||||
|
||||
// if non-empty record type is expected, check that it matches.
|
||||
if expected.RecordType != "" && endpoint.RecordType != expected.RecordType {
|
||||
t.Errorf("expected %s, got %s", expected.RecordType, endpoint.RecordType)
|
||||
t.Errorf("RecordType expected %q, got %q", expected.RecordType, endpoint.RecordType)
|
||||
}
|
||||
|
||||
// if non-empty labels are expected, check that they matches.
|
||||
if expected.Labels != nil && !reflect.DeepEqual(endpoint.Labels,expected.Labels) {
|
||||
t.Errorf("expected %s, got %s", expected.Labels, endpoint.Labels)
|
||||
if expected.Labels != nil && !reflect.DeepEqual(endpoint.Labels, expected.Labels) {
|
||||
t.Errorf("Labels expected %s, got %s", expected.Labels, endpoint.Labels)
|
||||
}
|
||||
|
||||
if (len(expected.ProviderSpecific) != 0 || len(endpoint.ProviderSpecific) != 0) &&
|
||||
!reflect.DeepEqual(endpoint.ProviderSpecific, expected.ProviderSpecific) {
|
||||
t.Errorf("ProviderSpecific expected %s, got %s", expected.ProviderSpecific, endpoint.ProviderSpecific)
|
||||
}
|
||||
|
||||
if endpoint.SetIdentifier != expected.SetIdentifier {
|
||||
t.Errorf("SetIdentifier expected %q, got %q", expected.SetIdentifier, endpoint.SetIdentifier)
|
||||
}
|
||||
}
|
||||
|
@ -190,15 +190,6 @@ func (cli *routeGroupClient) do(req *http.Request) (*http.Response, error) {
|
||||
return cli.client.Do(req)
|
||||
}
|
||||
|
||||
func parseTemplate(fqdnTemplate string) (tmpl *template.Template, err error) {
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
}
|
||||
return tmpl, err
|
||||
}
|
||||
|
||||
// NewRouteGroupSource creates a new routeGroupSource with the given config.
|
||||
func NewRouteGroupSource(timeout time.Duration, token, tokenPath, apiServerURL, namespace, annotationFilter, fqdnTemplate, routegroupVersion string, combineFqdnAnnotation, ignoreHostnameAnnotation bool) (Source, error) {
|
||||
tmpl, err := parseTemplate(fqdnTemplate)
|
@ -44,6 +44,8 @@ func createTestRouteGroup(ns, name string, annotations map[string]string, hosts
|
||||
}
|
||||
|
||||
func TestEndpointsFromRouteGroups(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
source *routeGroupSource
|
@ -17,20 +17,24 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/internal/config"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -106,6 +110,35 @@ func parseTTL(s string) (ttlSeconds int64, err error) {
|
||||
return int64(ttlDuration.Seconds()), nil
|
||||
}
|
||||
|
||||
type kubeObject interface {
|
||||
runtime.Object
|
||||
metav1.Object
|
||||
}
|
||||
|
||||
func execTemplate(tmpl *template.Template, obj kubeObject) (hostnames []string, err error) {
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, obj); err != nil {
|
||||
kind := obj.GetObjectKind().GroupVersionKind().Kind
|
||||
return nil, fmt.Errorf("failed to apply template on %s %s/%s: %w", kind, obj.GetNamespace(), obj.GetName(), err)
|
||||
}
|
||||
for _, name := range strings.Split(buf.String(), ",") {
|
||||
name = strings.TrimFunc(name, unicode.IsSpace)
|
||||
name = strings.TrimSuffix(name, ".")
|
||||
hostnames = append(hostnames, name)
|
||||
}
|
||||
return hostnames, nil
|
||||
}
|
||||
|
||||
func parseTemplate(fqdnTemplate string) (tmpl *template.Template, err error) {
|
||||
if fqdnTemplate == "" {
|
||||
return nil, nil
|
||||
}
|
||||
funcs := template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}
|
||||
return template.New("endpoint").Funcs(funcs).Parse(fqdnTemplate)
|
||||
}
|
||||
|
||||
func getHostnamesFromAnnotations(annotations map[string]string) []string {
|
||||
hostnameAnnotation, exists := annotations[hostnameAnnotationKey]
|
||||
if !exists {
|
||||
@ -253,23 +286,48 @@ func matchLabelSelector(selector labels.Selector, srcAnnotations map[string]stri
|
||||
return selector.Matches(annotations)
|
||||
}
|
||||
|
||||
func poll(interval time.Duration, timeout time.Duration, condition wait.ConditionFunc) error {
|
||||
if config.FastPoll {
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
type eventHandlerFunc func()
|
||||
|
||||
ok, err := condition()
|
||||
func (fn eventHandlerFunc) OnAdd(obj interface{}) { fn() }
|
||||
func (fn eventHandlerFunc) OnUpdate(oldObj, newObj interface{}) { fn() }
|
||||
func (fn eventHandlerFunc) OnDelete(obj interface{}) { fn() }
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
interval = 50 * time.Millisecond
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
|
||||
return wait.Poll(interval, timeout, condition)
|
||||
type informerFactory interface {
|
||||
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
||||
}
|
||||
|
||||
func waitForCacheSync(ctx context.Context, factory informerFactory) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
for typ, done := range factory.WaitForCacheSync(ctx.Done()) {
|
||||
if !done {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("failed to sync %v: %v", typ, ctx.Err())
|
||||
default:
|
||||
return fmt.Errorf("failed to sync %v", typ)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type dynamicInformerFactory interface {
|
||||
WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool
|
||||
}
|
||||
|
||||
func waitForDynamicCacheSync(ctx context.Context, factory dynamicInformerFactory) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
for typ, done := range factory.WaitForCacheSync(ctx.Done()) {
|
||||
if !done {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("failed to sync %v: %v", typ, ctx.Err())
|
||||
default:
|
||||
return fmt.Errorf("failed to sync %v", typ)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
istioclient "istio.io/client-go/pkg/clientset/versioned"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
@ -42,7 +43,7 @@ var ErrSourceNotFound = errors.New("source not found")
|
||||
type Config struct {
|
||||
Namespace string
|
||||
AnnotationFilter string
|
||||
LabelFilter string
|
||||
LabelFilter labels.Selector
|
||||
FQDNTemplate string
|
||||
CombineFQDNAndAnnotation bool
|
||||
IgnoreHostnameAnnotation bool
|
||||
@ -66,6 +67,7 @@ type Config struct {
|
||||
SkipperRouteGroupVersion string
|
||||
RequestTimeout time.Duration
|
||||
DefaultTargets []string
|
||||
OCPRouterName string
|
||||
}
|
||||
|
||||
// ClientGenerator provides clients
|
||||
@ -183,13 +185,13 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewServiceSource(client, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.Compatibility, cfg.PublishInternal, cfg.PublishHostIP, cfg.AlwaysPublishNotReadyAddresses, cfg.ServiceTypeFilter, cfg.IgnoreHostnameAnnotation)
|
||||
return NewServiceSource(client, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.Compatibility, cfg.PublishInternal, cfg.PublishHostIP, cfg.AlwaysPublishNotReadyAddresses, cfg.ServiceTypeFilter, cfg.IgnoreHostnameAnnotation, cfg.LabelFilter)
|
||||
case "ingress":
|
||||
client, err := p.KubeClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewIngressSource(client, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation, cfg.IgnoreIngressTLSSpec, cfg.IgnoreIngressRulesSpec)
|
||||
return NewIngressSource(client, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation, cfg.IgnoreIngressTLSSpec, cfg.IgnoreIngressRulesSpec, cfg.LabelFilter)
|
||||
case "pod":
|
||||
client, err := p.KubeClient()
|
||||
if err != nil {
|
||||
@ -232,16 +234,6 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err
|
||||
return nil, err
|
||||
}
|
||||
return NewAmbassadorHostSource(dynamicClient, kubernetesClient, cfg.Namespace)
|
||||
case "contour-ingressroute":
|
||||
kubernetesClient, err := p.KubeClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dynamicClient, err := p.DynamicKubernetesClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewContourIngressRouteSource(dynamicClient, kubernetesClient, cfg.ContourLoadBalancerService, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation)
|
||||
case "contour-httpproxy":
|
||||
dynamicClient, err := p.DynamicKubernetesClient()
|
||||
if err != nil {
|
||||
@ -263,7 +255,7 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewOcpRouteSource(ocpClient, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation)
|
||||
return NewOcpRouteSource(ocpClient, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation, cfg.LabelFilter, cfg.OCPRouterName)
|
||||
case "fake":
|
||||
return NewFakeSource(cfg.FQDNTemplate)
|
||||
case "connector":
|
||||
|
@ -25,7 +25,11 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
istioclient "istio.io/client-go/pkg/clientset/versioned"
|
||||
istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
fakeDynamic "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
fakeKube "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
@ -89,16 +93,31 @@ type ByNamesTestSuite struct {
|
||||
}
|
||||
|
||||
func (suite *ByNamesTestSuite) TestAllInitialized() {
|
||||
fakeDynamic, _ := newDynamicKubernetesClient()
|
||||
|
||||
mockClientGenerator := new(MockClientGenerator)
|
||||
mockClientGenerator.On("KubeClient").Return(fakeKube.NewSimpleClientset(), nil)
|
||||
mockClientGenerator.On("IstioClient").Return(NewFakeConfigStore(), nil)
|
||||
mockClientGenerator.On("DynamicKubernetesClient").Return(fakeDynamic, nil)
|
||||
mockClientGenerator.On("IstioClient").Return(istiofake.NewSimpleClientset(), nil)
|
||||
mockClientGenerator.On("DynamicKubernetesClient").Return(fakeDynamic.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(),
|
||||
map[schema.GroupVersionResource]string{
|
||||
{
|
||||
Group: "projectcontour.io",
|
||||
Version: "v1",
|
||||
Resource: "httpproxies",
|
||||
}: "HTTPPRoxiesList",
|
||||
{
|
||||
Group: "contour.heptio.com",
|
||||
Version: "v1beta1",
|
||||
Resource: "tcpingresses",
|
||||
}: "TCPIngressesList",
|
||||
{
|
||||
Group: "configuration.konghq.com",
|
||||
Version: "v1beta1",
|
||||
Resource: "tcpingresses",
|
||||
}: "TCPIngressesList",
|
||||
}), nil)
|
||||
|
||||
sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-ingressroute", "contour-httpproxy", "kong-tcpingress", "fake"}, minimalConfig)
|
||||
sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-httpproxy", "kong-tcpingress", "fake"}, minimalConfig)
|
||||
suite.NoError(err, "should not generate errors")
|
||||
suite.Len(sources, 7, "should generate all six sources")
|
||||
suite.Len(sources, 6, "should generate all six sources")
|
||||
}
|
||||
|
||||
func (suite *ByNamesTestSuite) TestOnlyFake() {
|
||||
@ -133,9 +152,6 @@ func (suite *ByNamesTestSuite) TestKubeClientFails() {
|
||||
_, err = ByNames(mockClientGenerator, []string{"istio-gateway"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if kubernetes client cannot be created")
|
||||
|
||||
_, err = ByNames(mockClientGenerator, []string{"contour-ingressroute"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if kubernetes client cannot be created")
|
||||
|
||||
_, err = ByNames(mockClientGenerator, []string{"kong-tcpingress"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if kubernetes client cannot be created")
|
||||
}
|
||||
@ -149,8 +165,6 @@ func (suite *ByNamesTestSuite) TestIstioClientFails() {
|
||||
_, err := ByNames(mockClientGenerator, []string{"istio-gateway"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if istio client cannot be created")
|
||||
|
||||
_, err = ByNames(mockClientGenerator, []string{"contour-ingressroute"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if contour client cannot be created")
|
||||
_, err = ByNames(mockClientGenerator, []string{"contour-httpproxy"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if contour client cannot be created")
|
||||
}
|
||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package source
|
||||
|
||||
import (
|
||||
contour "github.com/projectcontour/contour/apis/contour/v1beta1"
|
||||
projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@ -36,7 +35,6 @@ func NewUnstructuredConverter() (*UnstructuredConverter, error) {
|
||||
}
|
||||
|
||||
// Setup converter to understand custom CRD types
|
||||
_ = contour.AddToScheme(uc.scheme)
|
||||
_ = projectcontour.AddToScheme(uc.scheme)
|
||||
|
||||
// Add the core types we need
|
||||
|
Loading…
Reference in New Issue
Block a user