Merge remote-tracking branch 'origing/master' into dansimone/support-wildcard-records

# Conflicts:
#	registry/txt.go
#	registry/txt_test.go
This commit is contained in:
dan.simone@oracle.com 2020-09-01 11:24:44 -07:00
commit e3bac0fb55
78 changed files with 4275 additions and 302 deletions

3
.github/labeler.yml vendored
View File

@ -64,3 +64,6 @@ provider/vinyldns: provider/vinyldns*
# Add 'provider/vultr' in file which starts with vultr
provider/vultr: provider/vultr*
# Add 'provider/ultradns' in file which starts with ultradns
provider/ultradns: provider/ultradns*

View File

@ -1,3 +1,17 @@
## Checklist
<!--
Please read https://github.com/kubernetes-sigs/external-dns#contributing before submitting
your pull request. Please fill in each section below to help us better prioritize your pull request. Thanks!
-->
- [ ] Update changelog in CHANGELOG.md, use section "Unreleased".
**Description**
<!-- Please provide a summary of the change here. -->
<!-- Please link to all GitHub issue that this pull request implements(i.e. Fixes #123) -->
Fixes #ISSUE
**Checklist**
- [ ] Unit tests updated
- [ ] End user documentation updated
- [ ] CHANGELOG.md updated, use section "Unreleased"

View File

@ -32,13 +32,13 @@ jobs:
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0
make lint
- name: Coverage
uses: shogo82148/actions-goveralls@v1
- name: Test
run: make test
- name: Send coverage
uses: shogo82148/actions-goveralls@v1
with:
path-to-profile: profile.cov

View File

@ -2,7 +2,7 @@ linters-settings:
exhaustive:
default-signifies-exhaustive: false
goimports:
local-prefixes: github.com/kubernetes-sigs/external-dns
local-prefixes: sigs.k8s.io/external-dns
golint:
min-confidence: 0.9
maligned:

View File

@ -1,7 +1,49 @@
## Unreleased
- Enhance pull request template @seanmalloy
- Improve errors context for AWS provider
- Scaleway Provider (#1643) @Sh4d1
- Enable azure_private_dns to work with non "AzurePublicCloud" clouds (#1578) @daddonpa
- Fix typos in documentation @ddymko
- Add Cloudflare documentation on use of `--zone-id-filter` (#1751) @loozhengyuan
- Fix: alibaba cloud keeping create record (#1682) @LXM
- Update all container registry references to use k8s.gcr.io @seanmalloy
- Provide available prometheus metrics in documentation @vinny-sabatini
- Fix index out of range when hostname has no dots (#1756) @chemasan
- Fixes test coverage with coveralls (#1755) @jgrumboe
- Add tutorial for GKE with workload identity (#1765) @ddgenome
## v0.7.3 - 2020-08-05
- Fix: add serviceaccount name in kustomize deployment (#1689) @jmthvt
- Updates Oracle OCI SDK to latest (#1687) @ericrrath
- UltraDNS Provider (#1635) @kbhandari
- Update apiVersions in docs (#1690) @ddgenome
- use the github actions build status badge (#1702) @tariq1890
- Upgrade Oracle OCI SDK (#1688) @ericrrath
- update dependencies and minor dep tree cleanup (#1692) @tariq1890
- Update link for linode cloud manager (#1661) @phillc
- Remove occurrences of "master" from the project (#1636) @Raffo
- Create pull_request_template (#1662) @njuettner
- dependencies: Upgrade all k8s client-go dependent sources to v1.18.X (#1627) @josephglanville
- add GitHub Actions (#1657) @Raffo
- add new source for istio virtual services (#1607) @tariq1890
- use latest Alpine version in ExternalDNS dockerfile (#1655) @tariq1890
- Update TTL docs to confirm DNSimple support (#1547) @weppos
- rm unused flag param istio-ingressgateways (#1649) @tariq1890
- Upgrade istio httpbin from 1.0 to 1.6 version (#1640) @ikovnatskymiacar
- Add endpoints to kustomize base (#1638) @Raffo
- DigitalOcean: support multiple targets per endpoint (#1595) @tdyas
- Vultr : Version bump + changes (#1637) @ddymko
- Hetzner DNS service support (#1570) @21h
- Add OVH API rate limiting option (Fix #1546) (#1619) @Hugome
- Add kustomize base (#1631) @Raffo
- increase test timeout to fix intermittent failures of ingress tests (#1612) @tdyas
- AWS: change the order of the actions, DELETE before CREATE fixes #1411 (#1555) @OmerKahani
- Fix handling of DNS updates for RFC2136 provider. (#1613) @dmayle
- digitalocean: increase API page size (#1611) @tdyas
- improve linter quality for external-dns (#1618) @njuettner
- fix convert int to string bug (#1620) @tariq1890
## v0.7.2 - 2020-06-03

View File

@ -27,18 +27,36 @@ cover:
cover-html: cover
go tool cover -html cover.out
.PHONY: go-lint
# Run the golangci-lint tool
go-lint:
golangci-lint run --timeout=15m ./...
.PHONY: licensecheck
# Run the licensecheck script to check for license headers
licensecheck:
@echo ">> checking license header"
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
awk 'NR<=5' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
.PHONY: lint
# Run all the linters
lint:
golangci-lint run --timeout=15m ./...
lint: licensecheck go-lint
# The verify target runs tasks similar to the CI tasks, but without code coverage
.PHONY: verify test
test:
go test -race ./...
go test -race -coverprofile=profile.cov ./...
# The build targets allow to build the binary and docker image
.PHONY: build build.docker build.mini

6
OWNERS
View File

@ -2,7 +2,9 @@
# https://github.com/kubernetes/community/blob/HEAD/contributors/guide/owners.md
approvers:
- hjacobs
- raffo
- linki
- njuettner
emeritus_approvers:
- hjacobs
- linki

View File

@ -3,7 +3,7 @@
</p>
# ExternalDNS
[![Build Status](https://travis-ci.org/kubernetes-sigs/external-dns.svg)](https://travis-ci.org/kubernetes-sigs/external-dns)
[![Build Status](https://github.com/kubernetes-sigs/external-dns/workflows/Go/badge.svg)](https://github.com/kubernetes-sigs/external-dns/actions)
[![Coverage Status](https://coveralls.io/repos/github/kubernetes-sigs/external-dns/badge.svg)](https://coveralls.io/github/kubernetes-sigs/external-dns)
[![GitHub release](https://img.shields.io/github/release/kubernetes-sigs/external-dns.svg)](https://github.com/kubernetes-sigs/external-dns/releases)
[![go-doc](https://godoc.org/github.com/kubernetes-sigs/external-dns?status.svg)](https://godoc.org/github.com/kubernetes-sigs/external-dns)
@ -46,6 +46,7 @@ ExternalDNS' current release is `v0.7`. This version allows you to keep selected
* [TransIP](https://www.transip.eu/domain-name/)
* [VinylDNS](https://www.vinyldns.io)
* [OVH](https://www.ovh.com)
* [Scaleway](https://www.scaleway.com)
From this release, ExternalDNS can become aware of the records it is managing (enabled via `--registry=txt`), therefore ExternalDNS can safely manage non-empty hosted zones. We strongly encourage you to use `v0.5` (or greater) with `--registry=txt` enabled and `--txt-owner-id` set to a unique value that doesn't change for the lifetime of your cluster. You might also want to run ExternalDNS in a dry run mode (`--dry-run` flag) to see the changes to be submitted to your DNS Provider API.
@ -71,7 +72,7 @@ We define the following stability levels for providers:
The following table clarifies the current status of the providers according to the aforementioned stability levels:
| Provider | Status | Maintainers |
| Provider | Status | Maintainers |
| -------- | ------ | ----------- |
| Google Cloud DNS | Stable | |
| AWS Route 53 | Stable | |
@ -97,7 +98,9 @@ The following table clarifies the current status of the providers according to t
| RancherDNS | Alpha | |
| Akamai FastDNS | Alpha | |
| OVH | Alpha | |
| Scaleway DNS | Alpha | @Sh4d1 |
| Vultr | Alpha | |
| UltraDNS | Alpha | |
## Running ExternalDNS:
@ -146,7 +149,9 @@ The following tutorials are provided:
* [TransIP](docs/tutorials/transip.md)
* [VinylDNS](docs/tutorials/vinyldns.md)
* [OVH](docs/tutorials/ovh.md)
* [Scaleway](docs/tutorials/scaleway.md)
* [Vultr](docs/tutorials/vultr.md)
* [UltraDNS](docs/tutorials/ultradns.md)
### Running Locally
@ -263,7 +268,7 @@ Here's a rough outline on what is to come (subject to change):
- [x] Support for DigitalOcean
- [x] Multiple DNS names per Service
### v0.5 - _current version_
### v0.5
- [x] Support for creating DNS records to multiple targets (for Google and AWS)
- [x] Support for OpenStack Designate
@ -327,7 +332,9 @@ ExternalDNS is an effort to unify the following similar projects in order to bri
* Molecule Software's [route53-kubernetes](https://github.com/wearemolecule/route53-kubernetes)
### User Demo How-To Blogs and Examples
* A full demo on GKE Kubernetes. See [How-to Kubernetes with DNS management (ssl-manager pre-req)](https://medium.com/@jpantjsoha/how-to-kubernetes-with-dns-management-for-gitops-31239ea75d8d)
* Run external-dns on GKE with workload identity. See [Kubernetes, ingress-nginx, cert-manager & external-dns](https://blog.atomist.com/kubernetes-ingress-nginx-cert-manager-external-dns/)
### Code of conduct

View File

@ -189,6 +189,16 @@ In case of an increased error count, you could correlate them with the `http_req
You can use the host label in the metric to figure out if the request was against the Kubernetes API server (Source errors) or the DNS provider API (Registry/Provider errors).
Here is the full list of available metrics provided by ExternalDNS:
| Name | Description | Type |
|-----------------------------------------------------|---------------------------------------------------------|---------|
| external_dns_controller_last_sync_timestamp_seconds | Timestamp of last successful sync with the DNS provider | Gauge |
| external_dns_registry_endpoints_total | Number of Endpoints in all sources | Gauge |
| external_dns_registry_errors_total | Number of Registry errors | Counter |
| external_dns_source_endpoints_total | Number of Endpoints in the registry | Gauge |
| external_dns_source_errors_total | Number of Source errors | Counter |
### How can I run ExternalDNS under a specific GCP Service Account, e.g. to access DNS records in other projects?
Have a look at https://github.com/linki/mate/blob/v0.6.2/examples/google/README.md#permissions
@ -204,10 +214,11 @@ $ docker run \
-e EXTERNAL_DNS_SOURCE=$'service\ningress' \
-e EXTERNAL_DNS_PROVIDER=google \
-e EXTERNAL_DNS_DOMAIN_FILTER=$'foo.com\nbar.com' \
registry.opensource.zalan.do/teapot/external-dns:latest
k8s.gcr.io/external-dns/external-dns:v0.7.3
time="2017-08-08T14:10:26Z" level=info msg="config: &{APIServerURL: KubeConfig: Sources:[service ingress] Namespace: ...
```
Locally:
```console
@ -261,6 +272,9 @@ an instance of a ingress controller. Let's assume you have two ingress controlle
then you can start two ExternalDNS providers one with `--annotation-filter=kubernetes.io/ingress.class=nginx-internal`
and one with `--annotation-filter=kubernetes.io/ingress.class=nginx-external`.
Beware when using multiple sources, e.g. `--source=service --source=ingress`, `--annotation-filter` will filter every given source objects.
If you need to filter only one specific source you have to run a separated external dns service containing only the wanted `--source` and `--annotation-filter`.
### Can external-dns manage(add/remove) records in a hosted zone which is setup in different AWS account?
Yes, give it the correct cross-account/assume-role permissions and use the `--aws-assume-role` flag https://github.com/kubernetes-sigs/external-dns/pull/524#issue-181256561
@ -272,17 +286,16 @@ Separate them by `,`.
### Are there official Docker images provided?
When we tag a new release, we push a Docker image on Zalando's public Docker registry with the following name:
When we tag a new release, we push a container image to the Kubernetes projects official container registry with the following name:
```
registry.opensource.zalan.do/teapot/external-dns
k8s.gcr.io/external-dns/external-dns
```
As tags, you can use your version of choice or use `latest` that always resolves to the latest tag.
As tags, you use the external-dns release of choice(i.e. `v0.7.3`). A `latest` tag is not provided in the container registry.
If you wish to build your own image, you can use the provided [Dockerfile](../Dockerfile) as a starting point.
We are currently working with the Kubernetes community to provide official images for the project similarly to what is done with the other official Kubernetes projects, but we don't have an ETA on when those images will be available.
### Why am I seeing time out errors even though I have connectivity to my cluster?

View File

@ -6,6 +6,6 @@ Currently we don't release regularly. Whenever we think it makes sense to releas
## How to release a new image
When releasing a new version of external-dns, we tag the branch by using **vX.Y.Z** as tag name. This PR includes the updated **CHANGELOG.md** with the latest commits since last tag. As soon as we merge this PR into the default branch, Kubernetes based CI/CD system [Prow](https://prow.k8s.io/?repo=kubernetes-sigs%2Fexternal-dns) will trigger a job to push the image. We're using the Google Container Registry for our Docker images.
When releasing a new version of external-dns, we tag the branch by using **vX.Y.Z** as tag name. To prepare the release, a PR is created to update the **CHANGELOG.md** with the latest commits since last tag, as well as the [kustomization configuration](../kustomization/external-dns-deployment.yaml) to utilize the new tag. As soon as PR is merged into the default branch, the Kubernetes based CI/CD system [Prow](https://prow.k8s.io/?repo=kubernetes-sigs%2Fexternal-dns) will trigger a job to push the image. We're using the Google Container Registry for our Docker images.
The job itself looks at external-dns `cloudbuild.yaml` and executes the given steps. Inside it runs `make release.staging` which is basically only a `docker build` and `docker push`. The docker image is pushed `gcr.io/k8s-staging-external-dns/external-dns`, which is only a staging image and shouldn't be used. Promoting the official image we need to create another PR in [k8s.io](https://github.com/kubernetes/k8s.io), e.g. https://github.com/kubernetes/k8s.io/pull/540 by taking the current staging image using sha256.

View File

@ -46,6 +46,7 @@ Providers
- [x] TransIP
- [x] RFC2136
- [x] Vultr
- [x] UltraDNS
PRs welcome!
@ -80,3 +81,6 @@ The TransIP Provider minimal TTL is used when the TTL is 0. The minimal TTL is 6
### Vultr Provider
The Vultr provider minimal TTL is used when the TTL is 0. The default is 1 hour.
### UltraDNS
The UltraDNS provider minimal TTL is used when the TTL is not provided. The default TTL is account level default TTL, if defined, otherwise 24 hours.

View File

@ -7,7 +7,7 @@ Akamai FastDNS provider support was added via [this PR](https://github.com/kuber
The Akamai FastDNS provider expects that your zones, you wish to add records to, already exists
and are configured correctly. It does not add, remove or configure new zones in anyway.
To do this pease refer to the [FastDNS documentation](https://learn.akamai.com/en-us/products/web_performance/fast_dns.html).
To do this please refer to the [FastDNS documentation](https://learn.akamai.com/en-us/products/web_performance/fast_dns.html).
Additional data you will have to provide:
@ -18,7 +18,7 @@ Additional data you will have to provide:
Make these available to external DNS somehow. In the following example a secret is used by referencing the secret and its keys in the env section of the deployment.
If you happen to have questions regarding authentification, please refer to the [API Client Authentication documentation](https://developer.akamai.com/legacy/introduction/Client_Auth.html)
If you happen to have questions regarding authentication, please refer to the [API Client Authentication documentation](https://developer.akamai.com/legacy/introduction/Client_Auth.html)
## Deployment
@ -49,7 +49,7 @@ spec:
# serviceAccountName: external-dns
containers:
- name: external-dns
image: eu.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.6.0
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress # or service or both
- --provider=akamai
@ -97,7 +97,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]

View File

@ -110,7 +110,7 @@ this Ingress object will only be fronting one backend Service, we might instead
create the following:
```yaml
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
@ -145,7 +145,7 @@ and one AAAA record) for each hostname associated with the Ingress object.
Example:
```yaml
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:

View File

@ -113,7 +113,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -149,7 +149,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -187,7 +187,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress

View File

@ -81,7 +81,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
env:
- name: AWS_REGION
value: us-east-1 # put your CloudMap NameSpace region
@ -110,7 +110,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -148,7 +148,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
env:
- name: AWS_REGION
value: us-east-1 # put your CloudMap NameSpace region

View File

@ -141,7 +141,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -174,7 +174,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -216,7 +216,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress

View File

@ -150,11 +150,14 @@ The credentials of the service principal are provided to ExternalDNS as environm
### Manifest (for clusters without RBAC enabled)
```yaml
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: externaldns
spec:
selector:
matchLabels:
app: externaldns
strategy:
type: Recreate
template:
@ -164,7 +167,7 @@ spec:
spec:
containers:
- name: externaldns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -196,7 +199,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -216,11 +219,14 @@ subjects:
name: externaldns
namespace: default
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: externaldns
spec:
selector:
matchLabels:
app: externaldns
strategy:
type: Recreate
template:
@ -231,7 +237,7 @@ spec:
serviceAccountName: externaldns
containers:
- name: externaldns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -267,7 +273,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
---
@ -283,11 +289,14 @@ subjects:
- kind: ServiceAccount
name: externaldns
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: externaldns
spec:
selector:
matchLabels:
app: externaldns
strategy:
type: Recreate
template:
@ -298,7 +307,7 @@ spec:
serviceAccountName: externaldns
containers:
- name: externaldns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -326,11 +335,14 @@ $ kubectl create -f externaldns.yaml
Create a service file called 'nginx.yaml' with the following contents:
```yaml
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
@ -356,7 +368,7 @@ spec:
type: ClusterIP
---
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nginx

View File

@ -58,7 +58,7 @@ You can find the `subscriptionId` by running `az account show --query "id"` or b
The `resourceGroup` is the Resource Group created in a previous step.
The `aadClientID` and `aaClientSecret` are assoiated with the Service Principal, that you need to create next.
The `aadClientID` and `aaClientSecret` are associated with the Service Principal, that you need to create next.
### Creating service principal
A Service Principal with a minimum access level of `contributor` to the DNS zone(s) and `reader` to the resource group containing the Azure DNS zone(s) is necessary for ExternalDNS to be able to edit DNS records. However, other more permissive access levels will work too (e.g. `contributor` to the resource group or the whole subscription).
@ -191,7 +191,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -223,7 +223,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -261,7 +261,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -297,7 +297,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
---
@ -331,7 +331,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress

View File

@ -21,7 +21,9 @@ Snippet from [Cloudflare - Getting Started](https://api.cloudflare.com/#getting-
API Token will be preferred for authentication if `CF_API_TOKEN` environment variable is set.
Otherwise `CF_API_KEY` and `CF_API_EMAIL` should be set to run ExternalDNS with Cloudflare.
When using API Token authentication the token should be granted Zone `Read` and DNS `Edit` privileges.
When using API Token authentication, the token should be granted Zone `Read`, DNS `Edit` privileges, and access to `All zones`.
If you would like to further restrict the API permissions to a specific zone (or zones), you also need to use the `--zone-id-filter` so that the underlying API requests only access the zones that you explicitly specify, as opposed to accessing all zones.
## Deploy ExternalDNS
@ -48,10 +50,11 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
- --zone-id-filter=023e105f4ecef8ad9ca31a8372d0c353 # (optional) limit to a specific zone.
- --provider=cloudflare
- --cloudflare-proxied # (optional) enable the proxy feature of Cloudflare (DDOS protection, CDN...)
env:
@ -77,7 +80,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -115,10 +118,11 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
- --zone-id-filter=023e105f4ecef8ad9ca31a8372d0c353 # (optional) limit to a specific zone.
- --provider=cloudflare
- --cloudflare-proxied # (optional) enable the proxy feature of Cloudflare (DDOS protection, CDN...)
env:

View File

@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -50,7 +50,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -91,7 +91,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress

View File

@ -108,7 +108,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress
- --provider=coredns
@ -130,7 +130,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -175,7 +175,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress
- --provider=coredns

View File

@ -59,7 +59,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -98,7 +98,7 @@ rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -118,11 +118,14 @@ subjects:
name: external-dns
namespace: default
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
selector:
matchLabels:
app: external-dns
strategy:
type: Recreate
template:
@ -130,9 +133,10 @@ spec:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.

View File

@ -43,7 +43,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -68,7 +68,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -107,7 +107,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.

View File

@ -35,7 +35,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone you create in DNSimple.
@ -62,7 +62,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -100,7 +100,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone you create in DNSimple.
@ -173,7 +173,7 @@ the DNSimple DNS records.
### Getting your DNSimple Account ID
If you do not know your DNSimple account ID it can be aquired using the [whoami](https://developer.dnsimple.com/v2/identity/#whoami) endpoint from the DNSimple Identity API
If you do not know your DNSimple account ID it can be acquired using the [whoami](https://developer.dnsimple.com/v2/identity/#whoami) endpoint from the DNSimple Identity API
```sh
curl -H "Authorization: Bearer $DNSIMPLE_ACCOUNT_TOKEN" \

View File

@ -43,7 +43,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress
- --txt-prefix=_d
@ -130,7 +130,7 @@ spec:
As the DNS name `test-ingress.example.com` matches the filter, external-dns will create two records:
a CNAME for test-ingress.example.com and TXT for _dtest-ingress.example.com.
Create the Igress:
Create the Ingress:
```
$ kubectl create -f test-ingress.yaml

View File

@ -7,7 +7,7 @@ Exoscale provider support was added via [this PR](https://github.com/kubernetes-
The Exoscale provider expects that your Exoscale zones, you wish to add records to, already exists
and are configured correctly. It does not add, remove or configure new zones in anyway.
To do this pease refer to the [Exoscale DNS documentation](https://community.exoscale.com/documentation/dns/).
To do this please refer to the [Exoscale DNS documentation](https://community.exoscale.com/documentation/dns/).
Additionally you will have to provide the Exoscale...:
@ -41,7 +41,7 @@ spec:
# serviceAccountName: external-dns
containers:
- name: external-dns
image: eu.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.6.0
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress # or service or both
- --provider=exoscale
@ -74,7 +74,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]

View File

@ -2,7 +2,7 @@
This tutorial describes how to setup ExternalDNS for usage in conjunction with an ExternalName service.
## Usecases
## Use cases
The main use cases that inspired this feature is the necessity for having a subdomain pointing to an external domain. In this scenario, it makes sense for the subdomain to have a CNAME record pointing to the external domain.
@ -27,7 +27,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --log-level=debug
- --source=service

View File

@ -4,8 +4,6 @@ This tutorial describes how to setup ExternalDNS for usage within a GKE cluster.
## Set up your environment
*If you prefer to try-out ExternalDNS in one of the existing environments you can skip this step*
Setup your environment to work with Google Cloud Platform. Fill in your values as needed, e.g. target project.
```console
@ -14,6 +12,16 @@ $ gcloud config set compute/region "europe-west1"
$ gcloud config set compute/zone "europe-west1-d"
```
## GKE Node Scopes
*If you prefer to try-out ExternalDNS in one of the existing environments you can skip this step*
The following instructions use instance scopes to provide ExternalDNS with the
permissions it needs to manage DNS records. Note that since these permissions
are associated with the instance, all pods in the cluster will also have these
permissions. As such, this approach is not suitable for anything but testing
environments.
Create a GKE cluster.
```console
@ -52,58 +60,10 @@ $ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \
$ gcloud dns record-sets transaction execute --zone "gcp-zalan-do"
```
## Deploy ExternalDNS
### Deploy ExternalDNS
### Role-Based Access Control (RBAC)
Then apply the following manifests file to deploy ExternalDNS.
[RBAC]("https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control") is enabled by default on all Container clusters which are running Kubernetes version 1.6 or higher.
Because of the way Container Engine checks permissions when you create a Role or ClusterRole, you must first create a RoleBinding that grants you all of the permissions included in the role you want to create.
```console
kubectl create clusterrolebinding your-user-cluster-admin-binding --clusterrole=cluster-admin --user=your.google.cloud.email@example.org
```
Connect your `kubectl` client to the cluster you just created.
```console
gcloud container clusters get-credentials "external-dns"
```
Then apply one of the following manifests file to deploy ExternalDNS.
### Manifest (for clusters without RBAC enabled)
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
args:
- --source=service
- --source=ingress
- --domain-filter=external-dns-test.gcp.zalan.do # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones
- --provider=google
# - --google-project=zalando-external-dns-test # Use this to specify a project different from the one external-dns is running inside
- --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
- --registry=txt
- --txt-owner-id=my-identifier
```
### Manifest (for clusters with RBAC enabled)
```yaml
apiVersion: v1
kind: ServiceAccount
@ -118,7 +78,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -156,7 +116,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -170,8 +130,7 @@ spec:
Use `--dry-run` if you want to be extra careful on the first run. Note, that you will not see any records created when you are running in dry-run mode. You can, however, inspect the logs and watch what would have been done.
## Verify ExternalDNS works
### Verify ExternalDNS works
Create the following sample application to test that ExternalDNS works.
@ -301,7 +260,7 @@ $ curl via-ingress.external-dns-test.gcp.zalan.do
</html>
```
## Clean up
### Clean up
Make sure to delete all Service and Ingress objects before terminating the cluster so all load balancers get cleaned up correctly.
@ -326,5 +285,294 @@ $ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com.
$ gcloud dns record-sets transaction execute --zone "gcp-zalan-do"
```
### User Demo How-To Blogs and Examples
## GKE with Workload Identity
The following instructions use [GKE workload
identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
to provide ExternalDNS with the permissions it needs to manage DNS records.
Workload identity is the Google-recommended way to provide GKE workloads access
to GCP APIs.
Create a GKE cluster with workload identity enabled.
```console
$ gcloud container clusters create external-dns \
--workload-metadata-from-node=GKE_METADATA_SERVER \
--identity-namespace=zalando-external-dns-test.svc.id.goog
```
Create a GCP service account (GSA) for ExternalDNS and save its email address.
```console
$ sa_name="Kubernetes external-dns"
$ gcloud iam service-accounts create sa-edns --display-name="$sa_name"
$ sa_email=$(gcloud iam service-accounts list --format='value(email)' \
--filter="displayName:$sa_name")
```
Bind the ExternalDNS GSA to the DNS admin role.
```console
$ gcloud projects add-iam-policy-binding zalando-external-dns-test \
--member="serviceAccount:$sa_email" --role=roles/dns.admin
```
Link the ExternalDNS GSA to the Kubernetes service account (KSA) that
external-dns will run under, i.e., the external-dns KSA in the external-dns
namespaces.
```console
$ gcloud iam service-accounts add-iam-policy-binding "$sa_email" \
--member="serviceAccount:zalando-external-dns-test.svc.id.goog[external-dns/external-dns]" \
--role=roles/iam.workloadIdentityUser
```
Create a DNS zone which will contain the managed DNS records.
```console
$ gcloud dns managed-zones create external-dns-test-gcp-zalan-do \
--dns-name=external-dns-test.gcp.zalan.do. \
--description="Automatically managed zone by ExternalDNS"
```
Make a note of the nameservers that were assigned to your new zone.
```console
$ gcloud dns record-sets list \
--zone=external-dns-test-gcp-zalan-do \
--name=external-dns-test.gcp.zalan.do. \
--type NS
NAME TYPE TTL DATA
external-dns-test.gcp.zalan.do. NS 21600 ns-cloud-e1.googledomains.com.,ns-cloud-e2.googledomains.com.,ns-cloud-e3.googledomains.com.,ns-cloud-e4.googledomains.com.
```
In this case it's `ns-cloud-{e1-e4}.googledomains.com.` but your's could
slightly differ, e.g. `{a1-a4}`, `{b1-b4}` etc.
Tell the parent zone where to find the DNS records for this zone by adding the
corresponding NS records there. Assuming the parent zone is "gcp-zalan-do" and
the domain is "gcp.zalan.do" and that it's also hosted at Google we would do the
following.
```console
$ gcloud dns record-sets transaction start --zone=gcp-zalan-do
$ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \
--name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do
$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do
```
Connect your `kubectl` client to the cluster you just created and bind your GCP
user to the cluster admin role in Kubernetes.
```console
$ gcloud container clusters get-credentials external-dns
$ kubectl create clusterrolebinding cluster-admin-me \
--clusterrole=cluster-admin --user="$(gcloud config get-value account)"
```
### Deploy ExternalDNS
Apply the following manifest file to deploy external-dns.
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: external-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "pods"]
verbs: ["get", "watch", "list"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: external-dns
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
containers:
- args:
- --source=ingress
- --source=service
- --domain-filter=external-dns-test.gcp.zalan.do
- --provider=google
- --google-project=zalando-external-dns-test
- --registry=txt
- --txt-owner-id=my-identifier
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
name: external-dns
securityContext:
fsGroup: 65534
runAsUser: 65534
serviceAccountName: external-dns
```
Then add the proper workload identity annotation to the cert-manager service
account.
```bash
$ kubectl annotate serviceaccount --namespace=external-dns external-dns \
"iam.gke.io/gcp-service-account=$sa_email"
```
### Deploy a sample application
Create the following sample application to test that ExternalDNS works.
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nginx
spec:
rules:
- host: via-ingress.external-dns-test.gcp.zalan.do
http:
paths:
- backend:
serviceName: nginx
servicePort: 80
---
apiVersion: v1
kind: Service
metadata:
annotations:
external-dns.alpha.kubernetes.io/hostname: nginx.external-dns-test.gcp.zalan.do.
name: nginx
spec:
ports:
- port: 80
targetPort: 80
selector:
app: nginx
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
```
After roughly two minutes check that a corresponding DNS records for your
service and ingress were created.
```console
$ gcloud dns record-sets list \
--zone "external-dns-test-gcp-zalan-do" \
--name "via-ingress.external-dns-test.gcp.zalan.do." \
--type A
NAME TYPE TTL DATA
nginx.external-dns-test.gcp.zalan.do. A 300 104.155.60.49
nginx.external-dns-test.gcp.zalan.do. TXT 300 "heritage=external-dns,external-dns/owner=my-identifier"
via-ingress.external-dns-test.gcp.zalan.do. TXT 300 "heritage=external-dns,external-dns/owner=my-identifier"
via-ingress.external-dns-test.gcp.zalan.do. A 300 35.187.1.246
```
Let's check that we can resolve this DNS name as well.
```console
$ dig +short @ns-cloud-e1.googledomains.com. via-ingress.external-dns-test.gcp.zalan.do.
35.187.1.246
```
Try with `curl` as well.
```console
$ curl via-ingress.external-dns-test.gcp.zalan.do
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
...
</head>
<body>
...
</body>
</html>
```
### Clean up
Make sure to delete all service and ingress objects before terminating the
cluster so all load balancers and DNS entries get cleaned up correctly.
```console
$ kubectl delete ingress nginx
$ kubectl delete service nginx
```
Give ExternalDNS some time to clean up the DNS records for you. Then delete the
managed zone and cluster.
```console
$ gcloud dns managed-zones delete external-dns-test-gcp-zalan-do
$ gcloud container clusters delete external-dns
```
Also delete the NS records for your removed zone from the parent zone.
```console
$ gcloud dns record-sets transaction start --zone gcp-zalan-do
$ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com. \
--name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do
$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do
```
## User Demo How-To Blogs and Examples
* A full demo on GKE Kubernetes + CloudDNS + SA-Permissions [How-to Kubernetes with DNS management (ssl-manager pre-req)](https://medium.com/@jpantjsoha/how-to-kubernetes-with-dns-management-for-gitops-31239ea75d8d)
* Run external-dns on GKE with workload identity. See [Kubernetes, ingress-nginx, cert-manager & external-dns](https://blog.atomist.com/kubernetes-ingress-nginx-cert-manager-external-dns/)

View File

@ -43,7 +43,7 @@ spec:
spec:
containers:
- name: external-dns
image: eu.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.7.3
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -68,7 +68,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -107,7 +107,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.

View File

@ -2,7 +2,7 @@
This tutorial describes how to setup ExternalDNS for usage in conjunction with a Headless service.
## Usecases
## Use cases
The main use cases that inspired this feature is the necessity for fixed addressable hostnames with services, such as Kafka when trying to access them from outside the cluster. In this scenario, quite often, only the Node IP addresses are actually routable and as in systems like Kafka more direct connections are preferable.
## Setup
@ -31,7 +31,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --log-level=debug
- --source=service
@ -58,7 +58,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -96,7 +96,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --log-level=debug
- --source=service

View File

@ -69,7 +69,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --domain-filter=example.com # (optional) limit to only example.com domains.
@ -111,7 +111,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -149,7 +149,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --domain-filter=example.com # (optional) limit to only example.com domains.

View File

@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
@ -57,7 +57,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -98,7 +98,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress

View File

@ -43,6 +43,7 @@ metadata:
rules:
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
@ -51,6 +52,7 @@ rules:
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
@ -170,7 +172,7 @@ this Ingress object will only be fronting one backend Service, we might instead
create the following:
```yaml
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
@ -203,7 +205,7 @@ and one AAAA record) for each hostname associated with the Ingress object.
Example:
```yaml
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
@ -237,7 +239,7 @@ set to `nlb` then ExternalDNS will create an NLB instead of an ALB.
Example:
```yaml
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:

View File

@ -41,7 +41,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -67,7 +67,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -105,7 +105,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.

View File

@ -2,6 +2,8 @@
This tutorial describes how to setup ExternalDNS for usage within a GKE cluster that doesn't make use of Google's [default ingress controller](https://github.com/kubernetes/ingress-gce) but rather uses [nginx-ingress-controller](https://github.com/kubernetes/ingress-nginx) for that task.
## Set up your environment
Setup your environment to work with Google Cloud Platform. Fill in your values as needed, e.g. target project.
```console
@ -10,6 +12,14 @@ $ gcloud config set compute/region "europe-west1"
$ gcloud config set compute/zone "europe-west1-d"
```
## GKE Node Scopes
The following instructions use instance scopes to provide ExternalDNS with the
permissions it needs to manage DNS records. Note that since these permissions
are associated with the instance, all pods in the cluster will also have these
permissions. As such, this approach is not suitable for anything but testing
environments.
Create a GKE cluster without using the default ingress controller.
```console
@ -48,19 +58,20 @@ $ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \
$ gcloud dns record-sets transaction execute --zone "gcp-zalan-do"
```
If you decide not to create a new zone but reuse an existing one, make sure it's currently **unused** and **empty**. This version of ExternalDNS will remove all records it doesn't recognize from the zone.
Connect your `kubectl` client to the cluster you just created.
Connect your `kubectl` client to the cluster you just created and bind your GCP
user to the cluster admin role in Kubernetes.
```console
gcloud container clusters get-credentials "external-dns"
$ gcloud container clusters get-credentials "external-dns"
$ kubectl create clusterrolebinding cluster-admin-me \
--clusterrole=cluster-admin --user="$(gcloud config get-value account)"
```
## Deploy the nginx ingress controller
### Deploy the nginx ingress controller
First, you need to deploy the nginx-based ingress controller. It can be deployed in at least two modes: Leveraging a Layer 4 load balancer in front of the nginx proxies or directly targeting pods with hostPorts on your worker nodes. ExternalDNS doesn't really care and supports both modes.
### Default Backend
#### Default Backend
The nginx controller uses a default backend that it serves when no Ingress rule matches. This is a separate Service that can be picked by you. We'll use the default backend that's used by other ingress controllers for that matter. Apply the following manifests to your cluster to deploy the default backend.
@ -96,7 +107,7 @@ spec:
image: gcr.io/google_containers/defaultbackend:1.3
```
### Without a separate TCP load balancer
#### Without a separate TCP load balancer
By default, the controller will update your Ingress objects with the public IPs of the nodes running your nginx controller instances. You should run multiple instances in case of pod or node failure. The controller will do leader election and will put multiple IPs as targets in your Ingress objects in that case. It could also make sense to run it as a DaemonSet. However, we'll just run a single replica. You have to open the respective ports on all of your worker nodes to allow nginx to receive traffic.
@ -145,7 +156,7 @@ spec:
hostPort: 443
```
### With a separate TCP load balancer
#### With a separate TCP load balancer
However, you can also have the ingress controller proxied by a Kubernetes Service. This will instruct the controller to populate this Service's external IP as the external IP of the Ingress. This exposes the nginx proxies via a Layer 4 load balancer (`type=LoadBalancer`) which is more reliable than the other method. With that approach, you can run as many nginx proxy instances on your cluster as you like or have them autoscaled. This is the preferred way of running the nginx controller.
@ -206,7 +217,7 @@ spec:
- containerPort: 443
```
## Deploy ExternalDNS
### Deploy ExternalDNS
Apply the following manifest file to deploy ExternalDNS.
@ -224,7 +235,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -262,7 +273,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress
- --domain-filter=external-dns-test.gcp.zalan.do
@ -274,7 +285,7 @@ spec:
Use `--dry-run` if you want to be extra careful on the first run. Note, that you will not see any records created when you are running in dry-run mode. You can, however, inspect the logs and watch what would have been done.
## Deploy a sample application
### Deploy a sample application
Create the following sample application to test that ExternalDNS works.
@ -363,7 +374,7 @@ $ curl via-ingress.external-dns-test.gcp.zalan.do
</html>
```
## Clean up
### Clean up
Make sure to delete all Service and Ingress objects before terminating the cluster so all load balancers and DNS entries get cleaned up correctly.
@ -387,3 +398,302 @@ $ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com.
--name "external-dns-test.gcp.zalan.do." --ttl 300 --type NS --zone "gcp-zalan-do"
$ gcloud dns record-sets transaction execute --zone "gcp-zalan-do"
```
## GKE with Workload Identity
The following instructions use [GKE workload
identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
to provide ExternalDNS with the permissions it needs to manage DNS records.
Workload identity is the Google-recommended way to provide GKE workloads access
to GCP APIs.
Create a GKE cluster with workload identity enabled and without the
HttpLoadBalancing add-on.
```console
$ gcloud container clusters create external-dns \
--workload-metadata-from-node=GKE_METADATA_SERVER \
--identity-namespace=zalando-external-dns-test.svc.id.goog \
--addons=HorizontalPodAutoscaling
```
Create a GCP service account (GSA) for ExternalDNS and save its email address.
```console
$ sa_name="Kubernetes external-dns"
$ gcloud iam service-accounts create sa-edns --display-name="$sa_name"
$ sa_email=$(gcloud iam service-accounts list --format='value(email)' \
--filter="displayName:$sa_name")
```
Bind the ExternalDNS GSA to the DNS admin role.
```console
$ gcloud projects add-iam-policy-binding zalando-external-dns-test \
--member="serviceAccount:$sa_email" --role=roles/dns.admin
```
Link the ExternalDNS GSA to the Kubernetes service account (KSA) that
external-dns will run under, i.e., the external-dns KSA in the external-dns
namespaces.
```console
$ gcloud iam service-accounts add-iam-policy-binding "$sa_email" \
--member="serviceAccount:zalando-external-dns-test.svc.id.goog[external-dns/external-dns]" \
--role=roles/iam.workloadIdentityUser
```
Create a DNS zone which will contain the managed DNS records.
```console
$ gcloud dns managed-zones create external-dns-test-gcp-zalan-do \
--dns-name=external-dns-test.gcp.zalan.do. \
--description="Automatically managed zone by ExternalDNS"
```
Make a note of the nameservers that were assigned to your new zone.
```console
$ gcloud dns record-sets list \
--zone=external-dns-test-gcp-zalan-do \
--name=external-dns-test.gcp.zalan.do. \
--type NS
NAME TYPE TTL DATA
external-dns-test.gcp.zalan.do. NS 21600 ns-cloud-e1.googledomains.com.,ns-cloud-e2.googledomains.com.,ns-cloud-e3.googledomains.com.,ns-cloud-e4.googledomains.com.
```
In this case it's `ns-cloud-{e1-e4}.googledomains.com.` but your's could
slightly differ, e.g. `{a1-a4}`, `{b1-b4}` etc.
Tell the parent zone where to find the DNS records for this zone by adding the
corresponding NS records there. Assuming the parent zone is "gcp-zalan-do" and
the domain is "gcp.zalan.do" and that it's also hosted at Google we would do the
following.
```console
$ gcloud dns record-sets transaction start --zone=gcp-zalan-do
$ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \
--name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do
$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do
```
Connect your `kubectl` client to the cluster you just created and bind your GCP
user to the cluster admin role in Kubernetes.
```console
$ gcloud container clusters get-credentials external-dns
$ kubectl create clusterrolebinding cluster-admin-me \
--clusterrole=cluster-admin --user="$(gcloud config get-value account)"
```
### Deploy ingress-nginx
Follow the [ingress-nginx GKE installation
instructions](https://kubernetes.github.io/ingress-nginx/deploy/#gce-gke) to
deploy it to the cluster.
```console
$ kubectl apply -f \
https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.35.0/deploy/static/provider/cloud/deploy.yaml
```
### Deploy ExternalDNS
Apply the following manifest file to deploy external-dns.
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: external-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "pods"]
verbs: ["get", "watch", "list"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: external-dns
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
containers:
- args:
- --source=ingress
- --domain-filter=external-dns-test.gcp.zalan.do
- --provider=google
- --google-project=zalando-external-dns-test
- --registry=txt
- --txt-owner-id=my-identifier
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
name: external-dns
securityContext:
fsGroup: 65534
runAsUser: 65534
serviceAccountName: external-dns
```
Then add the proper workload identity annotation to the cert-manager service
account.
```bash
$ kubectl annotate serviceaccount --namespace=external-dns external-dns \
"iam.gke.io/gcp-service-account=$sa_email"
```
### Deploy a sample application
Create the following sample application to test that ExternalDNS works.
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nginx
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: via-ingress.external-dns-test.gcp.zalan.do
http:
paths:
- backend:
serviceName: nginx
servicePort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
ports:
- port: 80
targetPort: 80
selector:
app: nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
```
After roughly two minutes check that a corresponding DNS record for your ingress
was created.
```console
$ gcloud dns record-sets list \
--zone "external-dns-test-gcp-zalan-do" \
--name "via-ingress.external-dns-test.gcp.zalan.do." \
--type A
NAME TYPE TTL DATA
via-ingress.external-dns-test.gcp.zalan.do. A 300 35.187.1.246
```
Let's check that we can resolve this DNS name as well.
```console
$ dig +short @ns-cloud-e1.googledomains.com. via-ingress.external-dns-test.gcp.zalan.do.
35.187.1.246
```
Try with `curl` as well.
```console
$ curl via-ingress.external-dns-test.gcp.zalan.do
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
...
</head>
<body>
...
</body>
</html>
```
### Clean up
Make sure to delete all service and ingress objects before terminating the
cluster so all load balancers and DNS entries get cleaned up correctly.
```console
$ kubectl delete service --namespace=ingress-nginx ingress-nginx-controller
$ kubectl delete ingress nginx
```
Give ExternalDNS some time to clean up the DNS records for you. Then delete the
managed zone and cluster.
```console
$ gcloud dns managed-zones delete external-dns-test-gcp-zalan-do
$ gcloud container clusters delete external-dns
```
Also delete the NS records for your removed zone from the parent zone.
```console
$ gcloud dns record-sets transaction start --zone gcp-zalan-do
$ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com. \
--name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do
$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do
```
## User Demo How-To Blogs and Examples
* Run external-dns on GKE with workload identity. See [Kubernetes, ingress-nginx, cert-manager & external-dns](https://blog.atomist.com/kubernetes-ingress-nginx-cert-manager-external-dns/)

View File

@ -61,7 +61,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -87,7 +87,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -125,7 +125,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.

View File

@ -25,7 +25,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=openshift-route
- --domain-filter=external-dns-test.my-org.com # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones
@ -51,7 +51,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -92,7 +92,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=openshift-route
- --domain-filter=external-dns-test.my-org.com # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones

View File

@ -26,6 +26,8 @@ auth:
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
fingerprint: af:81:71:8e...
# Omit if there is not a password for the key
passphrase: Tx1jRk...
compartment: ocid1.compartment.oc1...
```
@ -53,7 +55,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -91,12 +93,12 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
- --provider=oci
- --policy=upsert-only # prevent ExternalDNSfrom deleting any records, omit to enable full synchronization
- --policy=upsert-only # prevent ExternalDNS from deleting any records, omit to enable full synchronization
- --txt-owner-id=my-identifier
volumeMounts:
- name: config

View File

@ -86,7 +86,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -119,7 +119,7 @@ rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -157,7 +157,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.

View File

@ -42,7 +42,7 @@ spec:
# serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # or ingress or both
- --provider=pdns
@ -78,7 +78,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]

View File

@ -243,7 +243,7 @@ spec:
- --txt-owner-id=external-dns
- --annotation-filter=kubernetes.io/ingress.class=external-ingress
- --aws-zone-type=public
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
name: external-dns-public
```
@ -281,7 +281,7 @@ spec:
- --txt-owner-id=dev.k8s.nexus
- --annotation-filter=kubernetes.io/ingress.class=internal-ingress
- --aws-zone-type=private
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
name: external-dns-private
```

View File

@ -53,7 +53,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -82,7 +82,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -120,7 +120,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.

View File

@ -54,7 +54,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress
- --provider=rdns
@ -78,7 +78,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -123,7 +123,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=ingress
- --provider=rdns

View File

@ -108,9 +108,29 @@ spec:
serviceName: my-service
servicePort: 8000
```
There are other annotation that can affect the generation of DNS records like
external-dns.alpha.kubernetes.io/ttl. These are beyond the scope of this
tutorial and are covered elsewhere in the docs.
### Custom TTL
The default DNS record TTL (Time-To-Live) is 0 seconds. You can customize this value by setting the annotation `external-dns.alpha.kubernetes.io/ttl`. e.g., modify the service manifest YAML file above:
```
apiVersion: v1
kind: Service
metadata:
name: nginx
annotations:
external-dns.alpha.kubernetes.io/hostname: nginx.external-dns-test.my-org.com
external-dns.alpha.kubernetes.io/ttl: 60
spec:
...
```
This will set the DNS record's TTL to 60 seconds.
A default TTL for all records can be set using the the flag with a time in seconds, minutes or hours, such as `--rfc2136-min-ttl=60s`
There are other annotation that can affect the generation of DNS records, but these are beyond the scope of this
tutorial and are covered in the main documentation.
### Test with external-dns installed on local machine (optional)
You may install external-dns and test on a local machine by running:
@ -152,6 +172,7 @@ rules:
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
@ -196,7 +217,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: eu.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.6.0
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --txt-owner-id=k8s
- --provider=rfc2136
@ -236,7 +257,7 @@ spec:
spec:
containers:
- name: external-dns
image: eu.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.6.0
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --txt-owner-id=k8s
- --provider=rfc2136

209
docs/tutorials/scaleway.md Normal file
View File

@ -0,0 +1,209 @@
# Setting up ExternalDNS for Services on Scaleway
This tutorial describes how to setup ExternalDNS for usage within a Kubernetes cluster using Scaleway DNS.
Make sure to use **>=0.7.3** version of ExternalDNS for this tutorial.
**Warning**: Scaleway DNS is currently in Public Beta and may not be suited for production usage.
## Importing a Domain into Scaleway DNS
In order to use your domain, you need to import it into Scaleway DNS. If it's not already done, you can follow [this documentation](https://www.scaleway.com/en/docs/scaleway-dns/)
Once the domain is imported you can either use the root zone, or create a subzone to use.
In this example we will use `example.com` as an example.
## Creating Scaleway Credentials
To use ExternalDNS with Scaleway DNS, you need to create an API token (composed of the Access Key and the Secret Key).
You can either use existing ones or you can create a new token, as explained in [How to generate an API token](https://www.scaleway.com/en/docs/generate-an-api-token/) or directly by going to the [credentials page](https://console.scaleway.com/account/organization/credentials).
Note that you will also need to the Organization ID, which can be retrieve on the same page.
Three environment variables are needed to run ExternalDNS with Scaleway DNS:
- `SCW_ACCESS_KEY` which is the Access Key.
- `SCW_SECRET_KEY` which is the Secret Key.
- `SCW_DEFAULT_ORGANIZATION_ID` which is your Organization ID.
## Deploy ExternalDNS
Connect your `kubectl` client to the cluster you want to test ExternalDNS with.
Then apply one of the following manifests file to deploy ExternalDNS.
The following example are suited for development. For a production usage, prefer secrets over environment, and use a [tagged release](https://github.com/kubernetes-sigs/external-dns/releases).
### Manifest (for clusters without RBAC enabled)
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
replicas: 1
selector:
matchLabels:
app: external-dns
strategy:
type: Recreate
template:
metadata:
labels:
app: external-dns
spec:
containers:
- name: external-dns
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
- --provider=scaleway
env:
- name: SCW_ACCESS_KEY
value: "<your access key>"
- name: SCW_SECRET_KEY
value: "<your secret key>"
- name: SCW_DEFAULT_ORGANIZATION_ID
value: "<your organization ID>"
```
### Manifest (for clusters with RBAC enabled)
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list","watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
replicas: 1
selector:
matchLabels:
app: external-dns
strategy:
type: Recreate
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
- --provider=scaleway
env:
- name: SCW_ACCESS_KEY
value: "<your access key>"
- name: SCW_SECRET_KEY
value: "<your secret key>"
- name: SCW_DEFAULT_ORGANIZATION_ID
value: "<your organization ID>"
```
## Deploying an Nginx Service
Create a service file called 'nginx.yaml' with the following contents:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
annotations:
external-dns.alpha.kubernetes.io/hostname: my-app.example.com
spec:
selector:
app: nginx
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 80
```
Note the annotation on the service; use the same hostname as the Scaleway DNS zone created above.
ExternalDNS uses this annotation to determine what services should be registered with DNS. Removing the annotation will cause ExternalDNS to remove the corresponding DNS records.
Create the deployment and service:
```console
$ kubectl create -f nginx.yaml
```
Depending where you run your service it can take a little while for your cloud provider to create an external IP for the service.
Once the service has an external IP assigned, ExternalDNS will notice the new service IP address and synchronize the Scaleway DNS records.
## Verifying Scaleway DNS records
Check your [Scaleway DNS UI](https://console.scaleway.com/domains/external) to view the records for your Scaleway DNS zone.
Click on the zone for the one created above if a different domain was used.
This should show the external IP address of the service as the A record for your domain.
## Cleanup
Now that we have verified that ExternalDNS will automatically manage Scaleway DNS records, we can delete the tutorial's example:
```
$ kubectl delete service -f nginx.yaml
$ kubectl delete service -f externaldns.yaml
```

View File

@ -20,7 +20,7 @@ spec:
spec:
containers:
- name: external-dns
image: eu.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.6.0 # minimum version is v0.5.6
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- ... # your arguments here
securityContext:

View File

@ -36,7 +36,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains
@ -69,7 +69,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -107,7 +107,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains

620
docs/tutorials/ultradns.md Normal file
View File

@ -0,0 +1,620 @@
# Setting up ExternalDNS for Services on UltraDNS
This tutorial describes how to setup ExternalDNS for usage within a Kubernetes cluster using UltraDNS.
For this tutorial, please make sure that you are using a version **> 0.7.2** of ExternalDNS.
## Managing DNS with UltraDNS
If you would like to read-up on the UltraDNS service, you can find additional details here: [Introduction to UltraDNS](https://docs.ultradns.neustar)
Before proceeding, please create a new DNS Zone that you will create your records in for this tutorial process. For the examples in this tutorial, we will be using `example.com` as our Zone.
## Setting Up UltraDNS Credentials
The following environment variables will be needed to run ExternalDNS with UltraDNS.
`ULTRADNS_USERNAME`,`ULTRADNS_PASSWORD`, &`ULTRADNS_BASEURL`
`ULTRADNS_ACCOUNTNAME`(optional variable).
## Deploying ExternalDNS
Connect your `kubectl` client to the cluster you want to test ExternalDNS with.
Then, apply one of the following manifests file to deploy ExternalDNS.
- Note: We are assuming the zone is already present within UltraDNS.
- Note: While creating CNAMES as target endpoints, the `--txt-prefix` option is mandatory.
### Manifest (for clusters without RBAC enabled)
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
containers:
- name: external-dns
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress # ingress is also possible
- --domain-filter=example.com # (Recommended) We recommend to use this filter as it minimize the time to propagate changes, as there are less number of zones to look into..
- --provider=ultradns
- --txt-prefix=txt-
env:
- name: ULTRADNS_USERNAME
value: ""
- name: ULTRADNS_PASSWORD # The password is required to be BASE64 encrypted.
value: ""
- name: ULTRADNS_BASEURL
value: "https://api.ultradns.com/"
- name: ULTRADNS_ACCOUNTNAME
value: ""
```
### Manifest (for clusters with RBAC enabled)
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list","watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress
- --domain-filter=example.com #(Recommended) We recommend to use this filter as it minimize the time to propagate changes, as there are less number of zones to look into..
- --provider=ultradns
- --txt-prefix=txt-
env:
- name: ULTRADNS_USERNAME
value: ""
- name: ULTRADNS_PASSWORD # The password is required to be BASE64 encrypted.
value: ""
- name: ULTRADNS_BASEURL
value: "https://api.ultradns.com/"
- name: ULTRADNS_ACCOUNTNAME
value: ""
```
## Deploying an Nginx Service
Create a service file called 'nginx.yaml' with the following contents:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
annotations:
external-dns.alpha.kubernetes.io/hostname: my-app.example.com.
spec:
selector:
app: nginx
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 80
```
Please note the annotation on the service. Use the same hostname as the UltraDNS zone created above.
ExternalDNS uses this annotation to determine what services should be registered with DNS. Removing the annotation will cause ExternalDNS to remove the corresponding DNS records.
## Creating the Deployment and Service:
```console
$ kubectl create -f nginx.yaml
$ kubectl create -f external-dns.yaml
```
Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
Once the service has an external IP assigned, ExternalDNS will notice the new service IP address and will synchronize the UltraDNS records.
## Verifying UltraDNS Records
Please verify on the [UltraDNS UI](https://portal.ultradns.neustar) that the records are created under the zone "example.com".
For more information on UltraDNS UI, refer to (https://docs.ultradns.neustar/mspuserguide.html).
Select the zone that was created above (or select the appropriate zone if a different zone was used.)
The external IP address will be displayed as a CNAME record for your zone.
## Cleaning Up the Deployment and Service
Now that we have verified that ExternalDNS will automatically manage your UltraDNS records, you can delete example zones that you created in this tutorial:
```
$ kubectl delete service -f nginx.yaml
$ kubectl delete service -f externaldns.yaml
```
## Examples to Manage your Records
### Creating Multiple A Records Target
- First, you want to create a service file called 'apple-banana-echo.yaml'
```yaml
---
kind: Pod
apiVersion: v1
metadata:
name: example-app
labels:
app: apple
spec:
containers:
- name: example-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: example-service
spec:
selector:
app: apple
ports:
- port: 5678 # Default port for image
```
- Then, create service file called 'expose-apple-banana-app.yaml' to expose the services. For more information to deploy ingress controller, refer to (https://kubernetes.github.io/ingress-nginx/deploy/)
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress
annotations:
ingress.kubernetes.io/rewrite-target: /
ingress.kubernetes.io/scheme: internet-facing
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
external-dns.alpha.kubernetes.io/target: 10.10.10.1,10.10.10.23
spec:
rules:
- http:
paths:
- path: /apple
backend:
serviceName: example-service
servicePort: 5678
```
- Then, create the deployment and service:
```console
$ kubectl create -f apple-banana-echo.yaml
$ kubectl create -f expose-apple-banana-app.yaml
$ kubectl create -f external-dns.yaml
```
- Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
- Please verify on the [UltraDNS UI](https://portal.ultradns.neustar) that the records have been created under the zone "example.com".
- Finally, you will need to clean up the deployment and service. Please verify on the UI afterwards that the records have been deleted from the zone example.com:
```console
$ kubectl delete -f apple-banana-echo.yaml
$ kubectl delete -f expose-apple-banana-app.yaml
$ kubectl delete -f external-dns.yaml
```
### Creating CNAME Record
- Please note, that prior to deploying the external-dns service, you will need to add the option txt-prefix=txt- into external-dns.yaml. If this not provided, your records will not be created.
- First, create a service file called 'apple-banana-echo.yaml'
- _Config File Example kubernetes cluster is on-premise not on cloud_
```yaml
---
kind: Pod
apiVersion: v1
metadata:
name: example-app
labels:
app: apple
spec:
containers:
- name: example-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: example-service
spec:
selector:
app: apple
ports:
- port: 5678 # Default port for image
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress
annotations:
ingress.kubernetes.io/rewrite-target: /
ingress.kubernetes.io/scheme: internet-facing
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
external-dns.alpha.kubernetes.io/target: apple.cname.com.
spec:
rules:
- http:
paths:
- path: /apple
backend:
serviceName: example-service
servicePort: 5678
```
- _Config File Example Kubernetes cluster service from different cloud vendors_
```yaml
---
kind: Pod
apiVersion: v1
metadata:
name: example-app
labels:
app: apple
spec:
containers:
- name: example-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: example-service
annotations:
external-dns.alpha.kubernetes.io/hostname: my-app.example.com.
spec:
selector:
app: apple
type: LoadBalancer
ports:
- protocol: TCP
port: 5678
targetPort: 5678
```
- Then, create the deployment and service:
```console
$ kubectl create -f apple-banana-echo.yaml
$ kubectl create -f external-dns.yaml
```
- Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
- Please verify on the [UltraDNS UI](https://portal.ultradns.neustar), that the records have been created under the zone "example.com".
- Finally, you will need to clean up the deployment and service. Please verify on the UI afterwards that the records have been deleted from the zone "example.com":
```console
$ kubectl delete -f apple-banana-echo.yaml
$ kubectl delete -f external-dns.yaml
```
### Creating Multiple Types Of Records
- Please note, that prior to deploying the external-dns service, you will need to add the option txt-prefix=txt- into external-dns.yaml. Since you will also be created a CNAME record, If this not provided, your records will not be created.
- First, create a service file called 'apple-banana-echo.yaml'
- _Config File Example kubernetes cluster is on-premise not on cloud_
```yaml
---
kind: Pod
apiVersion: v1
metadata:
name: example-app
labels:
app: apple
spec:
containers:
- name: example-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: example-service
spec:
selector:
app: apple
ports:
- port: 5678 # Default port for image
---
kind: Pod
apiVersion: v1
metadata:
name: example-app1
labels:
app: apple1
spec:
containers:
- name: example-app1
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: example-service1
spec:
selector:
app: apple1
ports:
- port: 5679 # Default port for image
---
kind: Pod
apiVersion: v1
metadata:
name: example-app2
labels:
app: apple2
spec:
containers:
- name: example-app2
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: example-service2
spec:
selector:
app: apple2
ports:
- port: 5680 # Default port for image
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress
annotations:
ingress.kubernetes.io/rewrite-target: /
ingress.kubernetes.io/scheme: internet-facing
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
external-dns.alpha.kubernetes.io/target: apple.cname.com.
spec:
rules:
- http:
paths:
- path: /apple
backend:
serviceName: example-service
servicePort: 5678
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress1
annotations:
ingress.kubernetes.io/rewrite-target: /
ingress.kubernetes.io/scheme: internet-facing
external-dns.alpha.kubernetes.io/hostname: apple-banana.example.com.
external-dns.alpha.kubernetes.io/target: 10.10.10.3
spec:
rules:
- http:
paths:
- path: /apple
backend:
serviceName: example-service1
servicePort: 5679
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress2
annotations:
ingress.kubernetes.io/rewrite-target: /
ingress.kubernetes.io/scheme: internet-facing
external-dns.alpha.kubernetes.io/hostname: banana.example.com.
external-dns.alpha.kubernetes.io/target: 10.10.10.3,10.10.10.20
spec:
rules:
- http:
paths:
- path: /apple
backend:
serviceName: example-service2
servicePort: 5680
```
- _Config File Example Kubernetes cluster service from different cloud vendors_
```yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
annotations:
external-dns.alpha.kubernetes.io/hostname: my-app.example.com.
spec:
selector:
app: nginx
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 80
---
kind: Pod
apiVersion: v1
metadata:
name: example-app
labels:
app: apple
spec:
containers:
- name: example-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: example-service
spec:
selector:
app: apple
ports:
- port: 5678 # Default port for image
---
kind: Pod
apiVersion: v1
metadata:
name: example-app1
labels:
app: apple1
spec:
containers:
- name: example-app1
image: hashicorp/http-echo
args:
- "-text=apple"
---
apiVersion: extensions/v1beta1
kind: Service
apiVersion: v1
metadata:
name: example-service1
spec:
selector:
app: apple1
ports:
- port: 5679 # Default port for image
---
kind: Ingress
metadata:
name: example-ingress
annotations:
ingress.kubernetes.io/rewrite-target: /
ingress.kubernetes.io/scheme: internet-facing
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
external-dns.alpha.kubernetes.io/target: 10.10.10.3,10.10.10.25
spec:
rules:
- http:
paths:
- path: /apple
backend:
serviceName: example-service
servicePort: 5678
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress1
annotations:
ingress.kubernetes.io/rewrite-target: /
ingress.kubernetes.io/scheme: internet-facing
external-dns.alpha.kubernetes.io/hostname: apple-banana.example.com.
external-dns.alpha.kubernetes.io/target: 10.10.10.3
spec:
rules:
- http:
paths:
- path: /apple
backend:
serviceName: example-service1
servicePort: 5679
```
- Then, create the deployment and service:
```console
$ kubectl create -f apple-banana-echo.yaml
$ kubectl create -f external-dns.yaml
```
- Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
- Please verify on the [UltraDNS UI](https://portal.ultradns.neustar), that the records have been created under the zone "example.com".
- Finally, you will need to clean up the deployment and service. Please verify on the UI afterwards that the records have been deleted from the zone "example.com":
```console
$ kubectl delete -f apple-banana-echo.yaml
$ kubectl delete -f external-dns.yaml```

View File

@ -66,7 +66,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --provider=vinyldns
- --source=service
@ -99,7 +99,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -137,7 +137,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --provider=vinyldns
- --source=service

View File

@ -42,7 +42,7 @@ spec:
spec:
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -68,7 +68,7 @@ rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
@ -106,7 +106,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service # ingress is also possible
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
@ -116,7 +116,7 @@ spec:
value: "YOU_VULTR_API_KEY"
```
## Deploying an Nginx Service
## Deploying a Nginx Service
Create a service file called 'nginx.yaml' with the following contents:

29
go.mod
View File

@ -5,11 +5,11 @@ go 1.14
require (
cloud.google.com/go v0.50.0
git.blindage.org/21h/hcloud-dns v0.0.0-20200525170043-def10a4a28e0
github.com/Azure/azure-sdk-for-go v36.0.0+incompatible
github.com/Azure/go-autorest/autorest v0.9.4
github.com/Azure/go-autorest/autorest/adal v0.8.3
github.com/Azure/go-autorest/autorest/azure/auth v0.0.0-00010101000000-000000000000
github.com/Azure/go-autorest/autorest/to v0.3.0
github.com/Azure/azure-sdk-for-go v45.1.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.4
github.com/Azure/go-autorest/autorest/adal v0.9.2
github.com/Azure/go-autorest/autorest/azure/auth v0.5.1
github.com/Azure/go-autorest/autorest/to v0.4.0
github.com/akamai/AkamaiOPEN-edgegrid-golang v0.9.11
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 // indirect
github.com/alecthomas/colour v0.1.0 // indirect
@ -23,13 +23,12 @@ require (
github.com/digitalocean/godo v1.36.0
github.com/dnsimple/dnsimple-go v0.60.0
github.com/exoscale/egoscale v0.18.1
github.com/fatih/structs v1.1.0 // indirect
github.com/ffledgling/pdns-go v0.0.0-20180219074714-524e7daccd99
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
github.com/golang/sync v0.0.0-20180314180146-1d60e4601c6f
github.com/google/go-cmp v0.4.1
github.com/gophercloud/gophercloud v0.1.0
github.com/gorilla/mux v1.7.4 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/infobloxopen/infoblox-go-client v0.0.0-20180606155407-61dc5f9b0a65
github.com/linki/instrumented_http v0.2.0
github.com/linode/linodego v0.19.0
@ -45,10 +44,12 @@ require (
github.com/projectcontour/contour v1.5.0
github.com/prometheus/client_golang v1.7.1
github.com/sanyu/dynectsoap v0.0.0-20181203081243-b83de5edc4e0
github.com/sirupsen/logrus v1.4.2
github.com/smartystreets/gunit v1.3.4 // indirect
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6.0.20200623155123-84df6c4b5301
github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.5.1
github.com/terra-farm/udnssdk v1.3.5 // indirect
github.com/transip/gotransip v5.8.2+incompatible
github.com/ultradns/ultradns-sdk-go v0.0.0-20200616202852-e62052662f60
github.com/vinyldns/go-vinyldns v0.0.0-20200211145900-fe8a3d82e556
github.com/vultr/govultr v0.4.2
go.etcd.io/etcd v0.5.0-alpha.5.0.20200401174654-e694b7bb0875
@ -60,16 +61,12 @@ require (
gopkg.in/yaml.v2 v2.2.8
istio.io/api v0.0.0-20200529165953-72dad51d4ffc
istio.io/client-go v0.0.0-20200529172309-31c16ea3f751
k8s.io/api v0.18.3
k8s.io/apimachinery v0.18.3
k8s.io/client-go v0.18.3
k8s.io/api v0.18.8
k8s.io/apimachinery v0.18.8
k8s.io/client-go v0.18.8
)
replace (
github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.1+incompatible
github.com/Azure/go-autorest/autorest => github.com/Azure/go-autorest/autorest v0.9.1
github.com/Azure/go-autorest/autorest/adal => github.com/Azure/go-autorest/autorest/adal v0.6.0
github.com/Azure/go-autorest/autorest/azure/auth => github.com/Azure/go-autorest/autorest/azure/auth v0.3.0
github.com/golang/glog => github.com/kubermatic/glog-logrus v0.0.0-20180829085450-3fa5b9870d1d
// TODO(jpg): Pin gRPC to work around breaking change until all dependences are upgraded: https://github.com/etcd-io/etcd/issues/11563
google.golang.org/grpc => google.golang.org/grpc v1.26.0

93
go.sum
View File

@ -15,28 +15,39 @@ code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:s
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.blindage.org/21h/hcloud-dns v0.0.0-20200525170043-def10a4a28e0 h1:kdxglEveTcqIG5zEPdQ0Y5KctnIGR7zXsQCQakoTNxU=
git.blindage.org/21h/hcloud-dns v0.0.0-20200525170043-def10a4a28e0/go.mod h1:n26Twiii5jhkMC+Ocz/s8R73cBBcXRIwyTqQ+6bOZGo=
github.com/Azure/azure-sdk-for-go v36.0.0+incompatible h1:XIaBmA4pgKqQ7jInQPaNJQ4pOHrdJjw9gYXhbyiChaU=
github.com/Azure/azure-sdk-for-go v36.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A=
github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest/autorest v0.9.1 h1:JB7Mqhna/7J8gZfVHjxDSTLSD6ciz2YgSMb/4qLXTtY=
github.com/Azure/go-autorest/autorest v0.9.1/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo=
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 h1:JwftqZDtWkr3qt1kcEgPd7H57uCHsXKXf66agWUQcGw=
github.com/Azure/go-autorest/autorest/azure/auth v0.3.0/go.mod h1:CI4BQYBct8NS7BXNBBX+RchsFsUu5+oz+OSyR/ZIi7U=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.4 h1:iWJqGEvip7mjibEqC/srXNdo+4wLEPiwlP/7dZLtoPc=
github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.1 h1:bvUhZciHydpBxBmCheUgxxbSwJy7xcfjkUsjUcqSojc=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
@ -139,12 +150,15 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exoscale/egoscale v0.18.1 h1:1FNZVk8jHUx0AvWhOZxLEDNlacTU0chMXUUNkm9EZaI=
github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/ffledgling/pdns-go v0.0.0-20180219074714-524e7daccd99 h1:jmwW6QWvUO2OPe22YfgFvBaaZlSr8Rlrac5lZvG6IdM=
github.com/ffledgling/pdns-go v0.0.0-20180219074714-524e7daccd99/go.mod h1:4mP9w9+vYGw2jUx2+2v03IA+phyQQjNRR4AL3uxlNrs=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
@ -226,9 +240,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@ -237,7 +250,6 @@ github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@ -305,9 +317,8 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj
github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM=
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -318,6 +329,7 @@ github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/infobloxopen/infoblox-go-client v0.0.0-20180606155407-61dc5f9b0a65 h1:FP5rOFP4ifbtFIjFHJmwhFrsbDyONILK/FNntl/Pou8=
github.com/infobloxopen/infoblox-go-client v0.0.0-20180606155407-61dc5f9b0a65/go.mod h1:BXiw7S2b9qJoM8MS40vfgCNB2NLHGusk1DtO16BD9zI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
@ -339,8 +351,9 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -419,8 +432,6 @@ github.com/openshift/api v0.0.0-20200605231317-fb2a6ca106ae/go.mod h1:l6TGeqJ92D
github.com/openshift/build-machinery-go v0.0.0-20200424080330-082bf86082cc/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc=
github.com/openshift/client-go v0.0.0-20200608144219-584632b8fc73 h1:JePLt9EpNLF/30KsSsArrzxGWPaUIvYUt8Fwnw9wlgM=
github.com/openshift/client-go v0.0.0-20200608144219-584632b8fc73/go.mod h1:+66gk3dEqw9e+WoiXjJFzWlS1KGhj9ZRHi/RI/YG/ZM=
github.com/oracle/oci-go-sdk v1.8.0 h1:4SO45bKV0I3/Mn1os3ANDZmV0eSE5z5CLdSUIkxtyzs=
github.com/oracle/oci-go-sdk v1.8.0/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/oracle/oci-go-sdk v21.4.0+incompatible h1:ORX+RXBuG/INBs+rgx6S3qoShEZ5+rwEEyRn2s6bPiw=
github.com/oracle/oci-go-sdk v21.4.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014 h1:37VE5TYj2m/FLA9SNr4z0+A0JefvTmR60Zwf8XSEV7c=
@ -445,7 +456,6 @@ github.com/projectcontour/contour v1.5.0/go.mod h1:y1MEsorL/Q8lBG5BZz8Gzryi9L5ry
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
@ -456,14 +466,12 @@ github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@ -474,15 +482,17 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sanyu/dynectsoap v0.0.0-20181203081243-b83de5edc4e0 h1:vOcHdR1nu7DO4BAx1rwzdHV7jQTzW3gqcBT5qxHSc6A=
github.com/sanyu/dynectsoap v0.0.0-20181203081243-b83de5edc4e0/go.mod h1:FeplEtXXejBYC4NPAFTrs5L7KuK+5RL9bf5nB2vZe9o=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6.0.20200623155123-84df6c4b5301 h1:qj0du14RIOnmePII/eTlw1aHKDYL6zxDIk/Dq7Tef9k=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6.0.20200623155123-84df6c4b5301/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@ -492,9 +502,8 @@ github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:X
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/gunit v1.0.4 h1:tpTjnuH7MLlqhoD21vRoMZbMIi5GmBsAJDFyF67GhZA=
github.com/smartystreets/gunit v1.0.4/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ=
github.com/smartystreets/gunit v1.3.4 h1:iHc8Rfhb/uCOc9a3KGuD3ut22L+hLIVaqR1o5fS6zC4=
github.com/smartystreets/gunit v1.3.4/go.mod h1:ZjM1ozSIMJlAz/ay4SG8PeKF00ckUp+zMHZXV9/bvak=
github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@ -520,12 +529,16 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/terra-farm/udnssdk v1.3.5 h1:MNR3adfuuEK/l04+jzo8WW/0fnorY+nW515qb3vEr6I=
github.com/terra-farm/udnssdk v1.3.5/go.mod h1:8RnM56yZTR7mYyUIvrDgXzdRaEyFIzqdEi7+um26Sv8=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/transip/gotransip v5.8.2+incompatible h1:aNJhw/w/3QBqFcHAIPz1ytoK5FexeMzbUCGrrhWr3H0=
github.com/transip/gotransip v5.8.2+incompatible/go.mod h1:uacMoJVmrfOcscM4Bi5NVg708b7c6rz2oDTWqa7i2Ic=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ultradns/ultradns-sdk-go v0.0.0-20200616202852-e62052662f60 h1:n7unetnX8WWTc0U85h/0+dJoLWLqoaJwowXB9RkBdxU=
github.com/ultradns/ultradns-sdk-go v0.0.0-20200616202852-e62052662f60/go.mod h1:43vmy6GEvRuVMpGEWfJ/JoEM6RIqUQI1/tb8JqZR1zI=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
@ -569,15 +582,15 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -652,7 +665,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -667,7 +679,6 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -800,6 +811,8 @@ k8s.io/api v0.18.1/go.mod h1:3My4jorQWzSs5a+l7Ge6JBbIxChLnY8HnuT58ZWolss=
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0=
k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4=
k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8=
k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4=
@ -808,6 +821,8 @@ k8s.io/apimachinery v0.18.1/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftc
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk=
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk=
@ -816,6 +831,10 @@ k8s.io/client-go v0.18.1/go.mod h1:iCikYRiXOj/yRRFE/aWqrpPtDt4P2JVWhtHkmESTcfY=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/client-go v0.18.3 h1:QaJzz92tsN67oorwzmoB0a9r9ZVHuD5ryjbCKP0U22k=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
k8s.io/client-go v0.18.8 h1:SdbLpIxk5j5YbFr1b7fq8S7mDgDjYmUxSbszyoesoDM=
k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU=
k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269 h1:d8Fm55A+7HOczX58+x9x+nJnJ1Devt1aCrWVIPaw/Vg=
k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE=
k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=

View File

@ -1,3 +1,19 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
// FastPoll used for fast testing

View File

@ -1,12 +1,28 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutils
import (
"io/ioutil"
"log"
"os"
"log"
"github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/internal/config"
)

View File

@ -16,7 +16,7 @@ spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: us.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.7.2
image: k8s.gcr.io/external-dns/external-dns:v0.7.3
args:
- --source=service
- --source=ingress

21
main.go
View File

@ -26,8 +26,14 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"sigs.k8s.io/external-dns/controller"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
"sigs.k8s.io/external-dns/pkg/apis/externaldns/validation"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"
"sigs.k8s.io/external-dns/provider/akamai"
"sigs.k8s.io/external-dns/provider/alibabacloud"
"sigs.k8s.io/external-dns/provider/aws"
@ -52,16 +58,11 @@ import (
"sigs.k8s.io/external-dns/provider/rcode0"
"sigs.k8s.io/external-dns/provider/rdns"
"sigs.k8s.io/external-dns/provider/rfc2136"
"sigs.k8s.io/external-dns/provider/scaleway"
"sigs.k8s.io/external-dns/provider/transip"
"sigs.k8s.io/external-dns/provider/ultradns"
"sigs.k8s.io/external-dns/provider/vinyldns"
"sigs.k8s.io/external-dns/provider/vultr"
"sigs.k8s.io/external-dns/controller"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
"sigs.k8s.io/external-dns/pkg/apis/externaldns/validation"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"
"sigs.k8s.io/external-dns/registry"
"sigs.k8s.io/external-dns/source"
)
@ -191,6 +192,8 @@ func main() {
p, err = vinyldns.NewVinylDNSProvider(domainFilter, zoneIDFilter, cfg.DryRun)
case "vultr":
p, err = vultr.NewVultrProvider(domainFilter, cfg.DryRun)
case "ultradns":
p, err = ultradns.NewUltraDNSProvider(domainFilter, cfg.DryRun)
case "cloudflare":
p, err = cloudflare.NewCloudFlareProvider(domainFilter, zoneIDFilter, cfg.CloudflareZonesPerPage, cfg.CloudflareProxied, cfg.DryRun)
case "rcodezero":
@ -287,6 +290,8 @@ func main() {
)
case "transip":
p, err = transip.NewTransIPProvider(cfg.TransIPAccountName, cfg.TransIPPrivateKeyFile, domainFilter, cfg.DryRun)
case "scaleway":
p, err = scaleway.NewScalewayProvider(ctx, domainFilter, cfg.DryRun)
default:
log.Fatalf("unknown dns provider: %s", cfg.Provider)
}

View File

@ -24,6 +24,7 @@ import (
"github.com/alecthomas/kingpin"
"github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/source"
)
@ -41,7 +42,6 @@ type Config struct {
APIServerURL string
KubeConfig string
RequestTimeout time.Duration
IstioIngressGatewayServices []string
ContourLoadBalancerService string
SkipperRouteGroupVersion string
Sources []string
@ -319,7 +319,7 @@ func (cfg *Config) ParseFlags(args []string) error {
app.Flag("service-type-filter", "The service types to take care about (default: all, expected: ClusterIP, NodePort, LoadBalancer or ExternalName)").StringsVar(&cfg.ServiceTypeFilter)
// Flags related to providers
app.Flag("provider", "The DNS provider where the DNS records will be created (required, options: aws, aws-sd, google, azure, azure-dns, azure-private-dns, cloudflare, rcodezero, digitalocean, hetzner, dnsimple, akamai, infoblox, dyn, designate, coredns, skydns, inmemory, ovh, pdns, oci, exoscale, linode, rfc2136, ns1, transip, vinyldns, rdns, vultr)").Required().PlaceHolder("provider").EnumVar(&cfg.Provider, "aws", "aws-sd", "google", "azure", "azure-dns", "hetzner", "azure-private-dns", "alibabacloud", "cloudflare", "rcodezero", "digitalocean", "dnsimple", "akamai", "infoblox", "dyn", "designate", "coredns", "skydns", "inmemory", "ovh", "pdns", "oci", "exoscale", "linode", "rfc2136", "ns1", "transip", "vinyldns", "rdns", "vultr")
app.Flag("provider", "The DNS provider where the DNS records will be created (required, options: aws, aws-sd, google, azure, azure-dns, azure-private-dns, cloudflare, rcodezero, digitalocean, hetzner, dnsimple, akamai, infoblox, dyn, designate, coredns, skydns, inmemory, ovh, pdns, oci, exoscale, linode, rfc2136, ns1, transip, vinyldns, rdns, scaleway, vultr, ultradns)").Required().PlaceHolder("provider").EnumVar(&cfg.Provider, "aws", "aws-sd", "google", "azure", "azure-dns", "hetzner", "azure-private-dns", "alibabacloud", "cloudflare", "rcodezero", "digitalocean", "dnsimple", "akamai", "infoblox", "dyn", "designate", "coredns", "skydns", "inmemory", "ovh", "pdns", "oci", "exoscale", "linode", "rfc2136", "ns1", "transip", "vinyldns", "rdns", "scaleway", "vultr", "ultradns")
app.Flag("domain-filter", "Limit possible target zones by a domain suffix; specify multiple times for multiple domains (optional)").Default("").StringsVar(&cfg.DomainFilter)
app.Flag("exclude-domains", "Exclude subdomains (optional)").Default("").StringsVar(&cfg.ExcludeDomains)
app.Flag("zone-id-filter", "Filter target zones by hosted zone id; specify multiple times for multiple zones (optional)").Default("").StringsVar(&cfg.ZoneIDFilter)

View File

@ -28,6 +28,7 @@ import (
c "github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1"
"github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"

View File

@ -326,10 +326,6 @@ func (p *AlibabaCloudProvider) recordsForDNS() (endpoints []*endpoint.Endpoint,
recordType := recordList[0].Type
ttl := recordList[0].TTL
if ttl == defaultAlibabaCloudRecordTTL {
ttl = 0
}
var targets []string
for _, record := range recordList {
target := record.Value

View File

@ -276,6 +276,12 @@ func TestAlibabaCloudProvider_Records(t *testing.T) {
func TestAlibabaCloudProvider_ApplyChanges(t *testing.T) {
p := newTestAlibabaCloudProvider(false)
defaultTtlPlan := &endpoint.Endpoint{
DNSName: "ttl.container-service.top",
RecordType: "A",
RecordTTL: defaultAlibabaCloudRecordTTL,
Targets: endpoint.NewTargets("4.3.2.1"),
}
changes := plan.Changes{
Create: []*endpoint.Endpoint{
{
@ -284,6 +290,7 @@ func TestAlibabaCloudProvider_ApplyChanges(t *testing.T) {
RecordTTL: 300,
Targets: endpoint.NewTargets("4.3.2.1"),
},
defaultTtlPlan,
},
UpdateNew: []*endpoint.Endpoint{
{
@ -308,13 +315,20 @@ func TestAlibabaCloudProvider_ApplyChanges(t *testing.T) {
if err != nil {
t.Errorf("Failed to get records: %v", err)
} else {
if len(endpoints) != 2 {
if len(endpoints) != 3 {
t.Errorf("Incorrect number of records: %d", len(endpoints))
}
for _, endpoint := range endpoints {
t.Logf("Endpoint for %++v", *endpoint)
}
}
for _, ep := range endpoints {
if ep.DNSName == defaultTtlPlan.DNSName {
if ep.RecordTTL != defaultTtlPlan.RecordTTL {
t.Error("default ttl execute error")
}
}
}
}
func TestAlibabaCloudProvider_Records_PrivateZone(t *testing.T) {

View File

@ -30,6 +30,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/linki/instrumented_http"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/endpoint"
@ -101,6 +102,8 @@ var (
"elb.us-gov-west-1.amazonaws.com": "ZMG1MZ2THAWF1",
"elb.us-gov-east-1.amazonaws.com": "Z1ZSMQQ6Q24QQ8",
"elb.me-south-1.amazonaws.com": "Z3QSRYVP46NYYV",
// Global Accelerator
"awsglobalaccelerator.com": "Z2BJ6XQ5FK7U4H",
}
)
@ -166,7 +169,7 @@ func NewAWSProvider(awsConfig AWSConfig) (*AWSProvider, error) {
SharedConfigState: session.SharedConfigEnable,
})
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to instantiate AWS session")
}
if awsConfig.AssumeRole != "" {
@ -229,10 +232,10 @@ func (p *AWSProvider) Zones(ctx context.Context) (map[string]*route53.HostedZone
err := p.client.ListHostedZonesPagesWithContext(ctx, &route53.ListHostedZonesInput{}, f)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to list hosted zones")
}
if tagErr != nil {
return nil, tagErr
return nil, errors.Wrap(tagErr, "failed to list zones tags")
}
for _, zone := range zones {
@ -255,7 +258,7 @@ func wildcardUnescape(s string) string {
func (p *AWSProvider) Records(ctx context.Context) (endpoints []*endpoint.Endpoint, _ error) {
zones, err := p.Zones(ctx)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "records retrieval failed")
}
return p.records(ctx, zones)
@ -339,7 +342,7 @@ func (p *AWSProvider) records(ctx context.Context, zones map[string]*route53.Hos
}
if err := p.client.ListResourceRecordSetsPagesWithContext(ctx, params, f); err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to list resource records sets for zone %s", *z.Id)
}
}
@ -364,12 +367,12 @@ func (p *AWSProvider) DeleteRecords(ctx context.Context, endpoints []*endpoint.E
func (p *AWSProvider) doRecords(ctx context.Context, action string, endpoints []*endpoint.Endpoint) error {
zones, err := p.Zones(ctx)
if err != nil {
return err
return errors.Wrapf(err, "failed to list zones, aborting %s doRecords action", action)
}
records, err := p.records(ctx, zones)
if err != nil {
log.Errorf("getting records failed: %v", err)
log.Errorf("failed to list records while preparing %s doRecords action: %s", action, err)
}
return p.submitChanges(ctx, p.newChanges(action, endpoints, records, zones), zones)
}
@ -378,7 +381,7 @@ func (p *AWSProvider) doRecords(ctx context.Context, action string, endpoints []
func (p *AWSProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
zones, err := p.Zones(ctx)
if err != nil {
return err
return errors.Wrap(err, "failed to list zones, not applying changes")
}
records, ok := ctx.Value(provider.RecordsContextKey).([]*endpoint.Endpoint)
@ -386,7 +389,7 @@ func (p *AWSProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) e
var err error
records, err = p.records(ctx, zones)
if err != nil {
log.Errorf("getting records failed: %v", err)
log.Errorf("failed to get records while preparing to applying changes: %s", err)
}
}
@ -453,7 +456,7 @@ func (p *AWSProvider) submitChanges(ctx context.Context, changes []*route53.Chan
}
if len(failedZones) > 0 {
return fmt.Errorf("failed to submit all changes for the following zones: %v", failedZones)
return errors.Errorf("failed to submit all changes for the following zones: %v", failedZones)
}
return nil
@ -581,7 +584,7 @@ func (p *AWSProvider) tagsForZone(ctx context.Context, zoneID string) (map[strin
ResourceId: aws.String(zoneID),
})
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to list tags for zone %s", zoneID)
}
tagMap := map[string]string{}
for _, tag := range response.ResourceTagSet.Tags {

View File

@ -65,9 +65,14 @@ func NewAzurePrivateDNSProvider(domainFilter endpoint.DomainFilter, zoneIDFilter
return nil, err
}
zonesClient := privatedns.NewPrivateZonesClient(subscriptionID)
settings, err := auth.GetSettingsFromEnvironment()
if err != nil {
return nil, err
}
zonesClient := privatedns.NewPrivateZonesClientWithBaseURI(settings.Environment.ResourceManagerEndpoint, subscriptionID)
zonesClient.Authorizer = authorizer
recordSetsClient := privatedns.NewRecordSetsClient(subscriptionID)
recordSetsClient := privatedns.NewRecordSetsClientWithBaseURI(settings.Environment.ResourceManagerEndpoint, subscriptionID)
recordSetsClient.Authorizer = authorizer
provider := &AzurePrivateDNSProvider{

View File

@ -18,12 +18,15 @@ package azure
import (
"context"
"os"
"testing"
"github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
@ -252,6 +255,36 @@ func newAzurePrivateDNSProvider(domainFilter endpoint.DomainFilter, zoneIDFilter
}
}
func validateAzurePrivateDNSClientsResourceManager(t *testing.T, environmentName string, expectedResourceManagerEndpoint string) {
err := os.Setenv(auth.EnvironmentName, environmentName)
if err != nil {
t.Fatal(err)
}
azurePrivateDNSProvider, err := NewAzurePrivateDNSProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), "k8s", "sub", true)
if err != nil {
t.Fatal(err)
}
zonesClientBaseURI := azurePrivateDNSProvider.zonesClient.(privatedns.PrivateZonesClient).BaseURI
recordSetsClientBaseURI := azurePrivateDNSProvider.recordSetsClient.(privatedns.RecordSetsClient).BaseURI
assert.Equal(t, zonesClientBaseURI, expectedResourceManagerEndpoint, "expected and actual resource manager endpoints don't match. expected: %s, got: %s", expectedResourceManagerEndpoint, zonesClientBaseURI)
assert.Equal(t, recordSetsClientBaseURI, expectedResourceManagerEndpoint, "expected and actual resource manager endpoints don't match. expected: %s, got: %s", expectedResourceManagerEndpoint, recordSetsClientBaseURI)
}
func TestNewAzurePrivateDNSProvider(t *testing.T) {
// make sure to reset the environment variables at the end again
originalEnv := os.Getenv(auth.EnvironmentName)
defer os.Setenv(auth.EnvironmentName, originalEnv)
validateAzurePrivateDNSClientsResourceManager(t, "", azure.PublicCloud.ResourceManagerEndpoint)
validateAzurePrivateDNSClientsResourceManager(t, "AZURECHINACLOUD", azure.ChinaCloud.ResourceManagerEndpoint)
validateAzurePrivateDNSClientsResourceManager(t, "AZUREGERMANCLOUD", azure.GermanCloud.ResourceManagerEndpoint)
validateAzurePrivateDNSClientsResourceManager(t, "AZUREUSGOVERNMENTCLOUD", azure.USGovernmentCloud.ResourceManagerEndpoint)
}
func TestAzurePrivateDNSRecord(t *testing.T) {
provider, err := newMockedAzurePrivateDNSProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, "k8s",
&[]privatedns.PrivateZone{

View File

@ -21,6 +21,7 @@ import (
hclouddns "git.blindage.org/21h/hcloud-dns"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"

View File

@ -0,0 +1,29 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleway
import (
domain "github.com/scaleway/scaleway-sdk-go/api/domain/v2alpha2"
"github.com/scaleway/scaleway-sdk-go/scw"
)
// DomainAPI is an interface matching the domain.API struct
type DomainAPI interface {
ListDNSZones(req *domain.ListDNSZonesRequest, opts ...scw.RequestOption) (*domain.ListDNSZonesResponse, error)
ListDNSZoneRecords(req *domain.ListDNSZoneRecordsRequest, opts ...scw.RequestOption) (*domain.ListDNSZoneRecordsResponse, error)
UpdateDNSZoneRecords(req *domain.UpdateDNSZoneRecordsRequest, opts ...scw.RequestOption) (*domain.UpdateDNSZoneRecordsResponse, error)
}

View File

@ -0,0 +1,337 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleway
import (
"context"
"fmt"
"strconv"
"strings"
domain "github.com/scaleway/scaleway-sdk-go/api/domain/v2alpha2"
"github.com/scaleway/scaleway-sdk-go/scw"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"
)
const (
scalewyRecordTTL uint32 = 300
scalewayDefaultPriority uint32 = 0
scalewayPriorityKey string = "scw/priority"
)
// ScalewayProvider implements the DNS provider for Scaleway DNS
type ScalewayProvider struct {
provider.BaseProvider
domainAPI DomainAPI
dryRun bool
// only consider hosted zones managing domains ending in this suffix
domainFilter endpoint.DomainFilter
}
// ScalewayChange differentiates between ChangActions
type ScalewayChange struct {
Action string
Record []domain.Record
}
// NewScalewayProvider initializes a new Scaleway DNS provider
func NewScalewayProvider(ctx context.Context, domainFilter endpoint.DomainFilter, dryRun bool) (*ScalewayProvider, error) {
scwClient, err := scw.NewClient(
scw.WithEnv(),
scw.WithUserAgent("ExternalDNS/"+externaldns.Version),
)
if err != nil {
return nil, err
}
if _, ok := scwClient.GetDefaultOrganizationID(); !ok {
return nil, fmt.Errorf("default organization is not set")
}
if _, ok := scwClient.GetAccessKey(); !ok {
return nil, fmt.Errorf("access key no set")
}
if _, ok := scwClient.GetSecretKey(); !ok {
return nil, fmt.Errorf("secret key no set")
}
domainAPI := domain.NewAPI(scwClient)
return &ScalewayProvider{
domainAPI: domainAPI,
dryRun: dryRun,
domainFilter: domainFilter,
}, nil
}
// Zones returns the list of hosted zones.
func (p *ScalewayProvider) Zones(ctx context.Context) ([]*domain.DNSZone, error) {
res := []*domain.DNSZone{}
dnsZones, err := p.domainAPI.ListDNSZones(&domain.ListDNSZonesRequest{}, scw.WithAllPages(), scw.WithContext(ctx))
if err != nil {
return nil, err
}
for _, dnsZone := range dnsZones.DNSZones {
if p.domainFilter.Match(getCompleteZoneName(dnsZone)) {
res = append(res, dnsZone)
}
}
return res, nil
}
// Records returns the list of records in a given zone.
func (p *ScalewayProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {
endpoints := map[string]*endpoint.Endpoint{}
dnsZones, err := p.Zones(ctx)
if err != nil {
return nil, err
}
for _, zone := range dnsZones {
recordsResp, err := p.domainAPI.ListDNSZoneRecords(&domain.ListDNSZoneRecordsRequest{
DNSZone: getCompleteZoneName(zone),
}, scw.WithAllPages())
if err != nil {
return nil, err
}
for _, record := range recordsResp.Records {
name := record.Name + "."
// trim any leading or ending dot
fullRecordName := strings.Trim(name+getCompleteZoneName(zone), ".")
if !provider.SupportedRecordType(record.Type.String()) {
log.Infof("Skipping record %s because type %s is not supported", fullRecordName, record.Type.String())
continue
}
// in external DNS, same endpoint have the same ttl and same priority
// it's not the case in Scaleway DNS. It should never happen, but if
// the record is modified without going through ExternalDNS, we could have
// different priorities of ttls for a same name.
// In this case, we juste take the first one.
if existingEndpoint, ok := endpoints[record.Type.String()+"/"+fullRecordName]; ok {
existingEndpoint.Targets = append(existingEndpoint.Targets, record.Data)
log.Infof("Appending target %s to record %s, using TTL and priotiry of target %s", record.Data, fullRecordName, existingEndpoint.Targets[0])
} else {
ep := endpoint.NewEndpointWithTTL(fullRecordName, record.Type.String(), endpoint.TTL(record.TTL), record.Data)
ep = ep.WithProviderSpecific(scalewayPriorityKey, fmt.Sprintf("%d", record.Priority))
endpoints[record.Type.String()+"/"+fullRecordName] = ep
}
}
}
returnedEndpoints := []*endpoint.Endpoint{}
for _, ep := range endpoints {
returnedEndpoints = append(returnedEndpoints, ep)
}
return returnedEndpoints, nil
}
// ApplyChanges applies a set of changes in a zone.
func (p *ScalewayProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
requests, err := p.generateApplyRequests(ctx, changes)
if err != nil {
return err
}
for _, req := range requests {
logChanges(req)
if p.dryRun {
log.Info("Running in dry run mode")
continue
}
_, err := p.domainAPI.UpdateDNSZoneRecords(req, scw.WithContext(ctx))
if err != nil {
return err
}
}
return nil
}
func (p *ScalewayProvider) generateApplyRequests(ctx context.Context, changes *plan.Changes) ([]*domain.UpdateDNSZoneRecordsRequest, error) {
returnedRequests := []*domain.UpdateDNSZoneRecordsRequest{}
recordsToAdd := map[string]*domain.RecordChangeAdd{}
recordsToDelete := map[string][]*domain.RecordChange{}
dnsZones, err := p.Zones(ctx)
if err != nil {
return nil, err
}
zoneNameMapper := provider.ZoneIDName{}
for _, zone := range dnsZones {
zoneName := getCompleteZoneName(zone)
zoneNameMapper.Add(zoneName, zoneName)
recordsToAdd[zoneName] = &domain.RecordChangeAdd{
Records: []*domain.Record{},
}
recordsToDelete[zoneName] = []*domain.RecordChange{}
}
for _, c := range changes.UpdateOld {
zone, _ := zoneNameMapper.FindZone(c.DNSName)
if zone == "" {
log.Infof("Ignore record %s since it's not handled by ExternalDNS", c.DNSName)
continue
}
recordsToDelete[zone] = append(recordsToDelete[zone], endpointToScalewayRecordsChangeDelete(zone, c)...)
}
for _, c := range changes.Delete {
zone, _ := zoneNameMapper.FindZone(c.DNSName)
if zone == "" {
log.Infof("Ignore record %s since it's not handled by ExternalDNS", c.DNSName)
continue
}
recordsToDelete[zone] = append(recordsToDelete[zone], endpointToScalewayRecordsChangeDelete(zone, c)...)
}
for _, c := range changes.Create {
zone, _ := zoneNameMapper.FindZone(c.DNSName)
if zone == "" {
log.Infof("Ignore record %s since it's not handled by ExternalDNS", c.DNSName)
continue
}
recordsToAdd[zone].Records = append(recordsToAdd[zone].Records, endpointToScalewayRecords(zone, c)...)
}
for _, c := range changes.UpdateNew {
zone, _ := zoneNameMapper.FindZone(c.DNSName)
if zone == "" {
log.Infof("Ignore record %s since it's not handled by ExternalDNS", c.DNSName)
continue
}
recordsToAdd[zone].Records = append(recordsToAdd[zone].Records, endpointToScalewayRecords(zone, c)...)
}
for _, zone := range dnsZones {
zoneName := getCompleteZoneName(zone)
req := &domain.UpdateDNSZoneRecordsRequest{
DNSZone: zoneName,
Changes: recordsToDelete[zoneName],
}
req.Changes = append(req.Changes, &domain.RecordChange{
Add: recordsToAdd[zoneName],
})
returnedRequests = append(returnedRequests, req)
}
return returnedRequests, nil
}
func getCompleteZoneName(zone *domain.DNSZone) string {
subdomain := zone.Subdomain + "."
if zone.Subdomain == "" {
subdomain = ""
}
return subdomain + zone.Domain
}
func endpointToScalewayRecords(zoneName string, ep *endpoint.Endpoint) []*domain.Record {
// no annotation results in a TTL of 0, default to 300 for consistency with other providers
var ttl = scalewyRecordTTL
if ep.RecordTTL.IsConfigured() {
ttl = uint32(ep.RecordTTL)
}
var priority = scalewayDefaultPriority
if prop, ok := ep.GetProviderSpecificProperty(scalewayPriorityKey); ok {
prio, err := strconv.ParseUint(prop.Value, 10, 64)
if err != nil {
log.Errorf("Failed parsing value of %s: %s: %v; using priority of %d", scalewayPriorityKey, prop.Value, err, scalewayDefaultPriority)
} else {
priority = uint32(prio)
}
}
records := []*domain.Record{}
for _, target := range ep.Targets {
records = append(records, &domain.Record{
Data: target,
Name: strings.Trim(strings.TrimSuffix(ep.DNSName, zoneName), ". "),
Priority: priority,
TTL: ttl,
Type: domain.RecordType(ep.RecordType),
})
}
return records
}
func endpointToScalewayRecordsChangeDelete(zoneName string, ep *endpoint.Endpoint) []*domain.RecordChange {
records := []*domain.RecordChange{}
for _, target := range ep.Targets {
records = append(records, &domain.RecordChange{
Delete: &domain.RecordChangeDelete{
Data: target,
Name: strings.Trim(strings.TrimSuffix(ep.DNSName, zoneName), ". "),
Type: domain.RecordType(ep.RecordType),
},
})
}
return records
}
func logChanges(req *domain.UpdateDNSZoneRecordsRequest) {
log.Infof("Updating zone %s", req.DNSZone)
if !log.IsLevelEnabled(log.InfoLevel) {
return
}
for _, change := range req.Changes {
if change.Add != nil {
for _, add := range change.Add.Records {
name := add.Name + "."
if add.Name == "" {
name = ""
}
logFields := log.Fields{
"record": name + req.DNSZone,
"type": add.Type.String(),
"ttl": add.TTL,
"priority": add.Priority,
"data": add.Data,
}
log.WithFields(logFields).Info("Adding record")
}
} else if change.Delete != nil {
name := change.Delete.Name + "."
if change.Delete.Name == "" {
name = ""
}
logFields := log.Fields{
"record": name + req.DNSZone,
"type": change.Delete.Type.String(),
"data": change.Delete.Data,
}
log.WithFields(logFields).Info("Deleting record")
}
}
}

View File

@ -0,0 +1,517 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleway
import (
"context"
"os"
"reflect"
"testing"
domain "github.com/scaleway/scaleway-sdk-go/api/domain/v2alpha2"
"github.com/scaleway/scaleway-sdk-go/scw"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
)
type mockScalewayDomain struct {
*domain.API
}
func (m *mockScalewayDomain) ListDNSZones(req *domain.ListDNSZonesRequest, opts ...scw.RequestOption) (*domain.ListDNSZonesResponse, error) {
return &domain.ListDNSZonesResponse{
DNSZones: []*domain.DNSZone{
{
Domain: "example.com",
Subdomain: "",
},
{
Domain: "example.com",
Subdomain: "test",
},
{
Domain: "dummy.me",
Subdomain: "",
},
{
Domain: "dummy.me",
Subdomain: "test",
},
},
}, nil
}
func (m *mockScalewayDomain) ListDNSZoneRecords(req *domain.ListDNSZoneRecordsRequest, opts ...scw.RequestOption) (*domain.ListDNSZoneRecordsResponse, error) {
records := []*domain.Record{}
if req.DNSZone == "example.com" {
records = []*domain.Record{
{
Data: "1.1.1.1",
Name: "one",
TTL: 300,
Priority: 0,
Type: domain.RecordTypeA,
},
{
Data: "1.1.1.2",
Name: "two",
TTL: 300,
Priority: 0,
Type: domain.RecordTypeA,
},
{
Data: "1.1.1.3",
Name: "two",
TTL: 300,
Priority: 0,
Type: domain.RecordTypeA,
},
}
} else if req.DNSZone == "test.example.com" {
records = []*domain.Record{
{
Data: "1.1.1.1",
Name: "",
TTL: 300,
Priority: 0,
Type: domain.RecordTypeA,
},
{
Data: "test.example.com",
Name: "two",
TTL: 600,
Priority: 30,
Type: domain.RecordTypeCNAME,
},
}
}
return &domain.ListDNSZoneRecordsResponse{
Records: records,
}, nil
}
func (m *mockScalewayDomain) UpdateDNSZoneRecords(req *domain.UpdateDNSZoneRecordsRequest, opts ...scw.RequestOption) (*domain.UpdateDNSZoneRecordsResponse, error) {
return nil, nil
}
func TestScalewayProvider_NewScalewayProvider(t *testing.T) {
_ = os.Setenv(scw.ScwAccessKeyEnv, "SCWXXXXXXXXXXXXXXXXX")
_ = os.Setenv(scw.ScwSecretKeyEnv, "11111111-1111-1111-1111-111111111111")
_ = os.Setenv(scw.ScwDefaultOrganizationIDEnv, "11111111-1111-1111-1111-111111111111")
_, err := NewScalewayProvider(context.TODO(), endpoint.NewDomainFilter([]string{"example.com"}), true)
if err != nil {
t.Errorf("failed : %s", err)
}
_ = os.Unsetenv(scw.ScwDefaultOrganizationIDEnv)
_, err = NewScalewayProvider(context.TODO(), endpoint.NewDomainFilter([]string{"example.com"}), true)
if err == nil {
t.Errorf("expected to fail")
}
_ = os.Setenv(scw.ScwDefaultOrganizationIDEnv, "dummy")
_, err = NewScalewayProvider(context.TODO(), endpoint.NewDomainFilter([]string{"example.com"}), true)
if err == nil {
t.Errorf("expected to fail")
}
_ = os.Unsetenv(scw.ScwSecretKeyEnv)
_ = os.Setenv(scw.ScwDefaultOrganizationIDEnv, "11111111-1111-1111-1111-111111111111")
_, err = NewScalewayProvider(context.TODO(), endpoint.NewDomainFilter([]string{"example.com"}), true)
if err == nil {
t.Errorf("expected to fail")
}
_ = os.Setenv(scw.ScwSecretKeyEnv, "dummy")
_, err = NewScalewayProvider(context.TODO(), endpoint.NewDomainFilter([]string{"example.com"}), true)
if err == nil {
t.Errorf("expected to fail")
}
_ = os.Unsetenv(scw.ScwAccessKeyEnv)
_ = os.Setenv(scw.ScwSecretKeyEnv, "11111111-1111-1111-1111-111111111111")
_, err = NewScalewayProvider(context.TODO(), endpoint.NewDomainFilter([]string{"example.com"}), true)
if err == nil {
t.Errorf("expected to fail")
}
_ = os.Setenv(scw.ScwAccessKeyEnv, "dummy")
_, err = NewScalewayProvider(context.TODO(), endpoint.NewDomainFilter([]string{"example.com"}), true)
if err == nil {
t.Errorf("expected to fail")
}
}
func TestScalewayProvider_Zones(t *testing.T) {
mocked := mockScalewayDomain{nil}
provider := &ScalewayProvider{
domainAPI: &mocked,
domainFilter: endpoint.NewDomainFilter([]string{"example.com"}),
}
expected := []*domain.DNSZone{
{
Domain: "example.com",
Subdomain: "",
},
{
Domain: "example.com",
Subdomain: "test",
},
}
zones, err := provider.Zones(context.Background())
if err != nil {
t.Fatal(err)
}
require.Len(t, zones, len(expected))
for i, zone := range zones {
assert.Equal(t, expected[i], zone)
}
}
func TestScalewayProvider_Records(t *testing.T) {
mocked := mockScalewayDomain{nil}
provider := &ScalewayProvider{
domainAPI: &mocked,
domainFilter: endpoint.NewDomainFilter([]string{"example.com"}),
}
expected := []*endpoint.Endpoint{
{
DNSName: "one.example.com",
RecordTTL: 300,
RecordType: "A",
Targets: []string{"1.1.1.1"},
ProviderSpecific: endpoint.ProviderSpecific{
{
Name: scalewayPriorityKey,
Value: "0",
},
},
},
{
DNSName: "two.example.com",
RecordTTL: 300,
RecordType: "A",
Targets: []string{"1.1.1.2", "1.1.1.3"},
ProviderSpecific: endpoint.ProviderSpecific{
{
Name: scalewayPriorityKey,
Value: "0",
},
},
},
{
DNSName: "test.example.com",
RecordTTL: 300,
RecordType: "A",
Targets: []string{"1.1.1.1"},
ProviderSpecific: endpoint.ProviderSpecific{
{
Name: scalewayPriorityKey,
Value: "0",
},
},
},
{
DNSName: "two.test.example.com",
RecordTTL: 600,
RecordType: "CNAME",
Targets: []string{"test.example.com"},
ProviderSpecific: endpoint.ProviderSpecific{
{
Name: scalewayPriorityKey,
Value: "30",
},
},
},
}
records, err := provider.Records(context.TODO())
if err != nil {
t.Fatal(err)
}
require.Len(t, records, len(expected))
for _, record := range records {
found := false
for _, expectedRecord := range expected {
if checkRecordEquality(record, expectedRecord) {
found = true
}
}
assert.Equal(t, true, found)
}
}
// this test is really ugly since we are working on maps, so array are randomly sorted
// feel free to modify if you have a better idea
func TestScalewayProvider_generateApplyRequests(t *testing.T) {
mocked := mockScalewayDomain{nil}
provider := &ScalewayProvider{
domainAPI: &mocked,
domainFilter: endpoint.NewDomainFilter([]string{"example.com"}),
}
expected := []*domain.UpdateDNSZoneRecordsRequest{
{
DNSZone: "example.com",
Changes: []*domain.RecordChange{
{
Add: &domain.RecordChangeAdd{
Records: []*domain.Record{
{
Data: "1.1.1.1",
Name: "",
TTL: 300,
Type: domain.RecordTypeA,
Priority: 0,
},
{
Data: "1.1.1.2",
Name: "",
TTL: 300,
Type: domain.RecordTypeA,
Priority: 0,
},
{
Data: "2.2.2.2",
Name: "me",
TTL: 600,
Type: domain.RecordTypeA,
Priority: 30,
},
},
},
},
{
Delete: &domain.RecordChangeDelete{
Data: "3.3.3.3",
Name: "me",
Type: domain.RecordTypeA,
},
},
{
Delete: &domain.RecordChangeDelete{
Data: "1.1.1.1",
Name: "here",
Type: domain.RecordTypeA,
},
},
{
Delete: &domain.RecordChangeDelete{
Data: "1.1.1.2",
Name: "here",
Type: domain.RecordTypeA,
},
},
},
},
{
DNSZone: "test.example.com",
Changes: []*domain.RecordChange{
{
Add: &domain.RecordChangeAdd{
Records: []*domain.Record{
{
Data: "example.com",
Name: "",
TTL: 600,
Type: domain.RecordTypeCNAME,
Priority: 20,
},
{
Data: "1.2.3.4",
Name: "my",
TTL: 300,
Type: domain.RecordTypeA,
Priority: 0,
},
{
Data: "5.6.7.8",
Name: "my",
TTL: 300,
Type: domain.RecordTypeA,
Priority: 0,
},
},
},
},
{
Delete: &domain.RecordChangeDelete{
Data: "1.1.1.1",
Name: "here.is.my",
Type: domain.RecordTypeA,
},
},
{
Delete: &domain.RecordChangeDelete{
Data: "4.4.4.4",
Name: "my",
Type: domain.RecordTypeA,
},
},
{
Delete: &domain.RecordChangeDelete{
Data: "5.5.5.5",
Name: "my",
Type: domain.RecordTypeA,
},
},
},
},
}
changes := &plan.Changes{
Create: []*endpoint.Endpoint{
{
DNSName: "example.com",
RecordType: "A",
Targets: []string{"1.1.1.1", "1.1.1.2"},
},
{
DNSName: "test.example.com",
RecordType: "CNAME",
ProviderSpecific: endpoint.ProviderSpecific{
{
Name: scalewayPriorityKey,
Value: "20",
},
},
RecordTTL: 600,
Targets: []string{"example.com"},
},
},
Delete: []*endpoint.Endpoint{
{
DNSName: "here.example.com",
RecordType: "A",
Targets: []string{"1.1.1.1", "1.1.1.2"},
},
{
DNSName: "here.is.my.test.example.com",
RecordType: "A",
Targets: []string{"1.1.1.1"},
},
},
UpdateNew: []*endpoint.Endpoint{
{
DNSName: "me.example.com",
ProviderSpecific: endpoint.ProviderSpecific{
{
Name: scalewayPriorityKey,
Value: "30",
},
},
RecordType: "A",
RecordTTL: 600,
Targets: []string{"2.2.2.2"},
},
{
DNSName: "my.test.example.com",
RecordType: "A",
Targets: []string{"1.2.3.4", "5.6.7.8"},
},
},
UpdateOld: []*endpoint.Endpoint{
{
DNSName: "me.example.com",
ProviderSpecific: endpoint.ProviderSpecific{
{
Name: scalewayPriorityKey,
Value: "1234",
},
},
RecordType: "A",
Targets: []string{"3.3.3.3"},
},
{
DNSName: "my.test.example.com",
RecordType: "A",
Targets: []string{"4.4.4.4", "5.5.5.5"},
},
},
}
requests, err := provider.generateApplyRequests(context.TODO(), changes)
if err != nil {
t.Fatal(err)
}
require.Len(t, requests, len(expected))
total := int(len(expected))
for _, req := range requests {
for _, exp := range expected {
if checkScalewayReqChanges(req, exp) {
total--
}
}
}
assert.Equal(t, 0, total)
}
func checkRecordEquality(record1, record2 *endpoint.Endpoint) bool {
return record1.Targets.Same(record2.Targets) &&
record1.DNSName == record2.DNSName &&
record1.RecordTTL == record2.RecordTTL &&
record1.RecordType == record2.RecordType &&
reflect.DeepEqual(record1.ProviderSpecific, record2.ProviderSpecific)
}
func checkScalewayReqChanges(r1, r2 *domain.UpdateDNSZoneRecordsRequest) bool {
if r1.DNSZone != r2.DNSZone {
return false
}
if len(r1.Changes) != len(r2.Changes) {
return false
}
total := int(len(r1.Changes))
for _, c1 := range r1.Changes {
for _, c2 := range r2.Changes {
// we only have 1 add per request
if c1.Add != nil && c2.Add != nil && checkScalewayRecords(c1.Add.Records, c2.Add.Records) {
total--
} else if c1.Delete != nil && c2.Delete != nil {
if c1.Delete.Data == c2.Delete.Data && c1.Delete.Name == c2.Delete.Name && c1.Delete.Type == c2.Delete.Type {
total--
}
}
}
}
return total == 0
}
func checkScalewayRecords(rs1, rs2 []*domain.Record) bool {
if len(rs1) != len(rs2) {
return false
}
total := int(len(rs1))
for _, r1 := range rs1 {
for _, r2 := range rs2 {
if r1.Data == r2.Data &&
r1.Name == r2.Name &&
r1.Priority == r2.Priority &&
r1.TTL == r2.TTL &&
r1.Type == r2.Type {
total--
}
}
}
return total == 0
}

View File

@ -0,0 +1,500 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ultradns
import (
"context"
"encoding/base64"
"fmt"
"os"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
udnssdk "github.com/ultradns/ultradns-sdk-go"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"
)
const (
ultradnsCreate = "CREATE"
ultradnsDelete = "DELETE"
ultradnsUpdate = "UPDATE"
sbPoolPriority = 1
sbPoolOrder = "ROUND_ROBIN"
rdPoolOrder = "ROUND_ROBIN"
)
// global variables
var sbPoolRunProbes = true
var sbPoolActOnProbes = true
var ultradnsPoolType = "rdpool"
var accountName string
//Setting custom headers for ultradns api calls
var customHeader = []udnssdk.CustomHeader{
{
Key: "UltraClient",
Value: "kube-client",
},
}
// UltraDNSProvider struct
type UltraDNSProvider struct {
provider.BaseProvider
client udnssdk.Client
domainFilter endpoint.DomainFilter
dryRun bool
}
// UltraDNSChanges struct
type UltraDNSChanges struct {
Action string
ResourceRecordSetUltraDNS udnssdk.RRSet
}
// NewUltraDNSProvider initializes a new UltraDNS DNS based provider
func NewUltraDNSProvider(domainFilter endpoint.DomainFilter, dryRun bool) (*UltraDNSProvider, error) {
username, ok := os.LookupEnv("ULTRADNS_USERNAME")
udnssdk.SetCustomHeader = customHeader
if !ok {
return nil, fmt.Errorf("no username found")
}
base64password, ok := os.LookupEnv("ULTRADNS_PASSWORD")
if !ok {
return nil, fmt.Errorf("no password found")
}
// Base64 Standard Decoding
password, err := base64.StdEncoding.DecodeString(base64password)
if err != nil {
fmt.Printf("Error decoding string: %s ", err.Error())
return nil, err
}
baseURL, ok := os.LookupEnv("ULTRADNS_BASEURL")
if !ok {
return nil, fmt.Errorf("no baseurl found")
}
accountName, ok = os.LookupEnv("ULTRADNS_ACCOUNTNAME")
if !ok {
accountName = ""
}
probeValue, ok := os.LookupEnv("ULTRADNS_ENABLE_PROBING")
if ok {
if (probeValue != "true") && (probeValue != "false") {
return nil, fmt.Errorf("please set proper probe value, the values can be either true or false")
}
sbPoolRunProbes, _ = strconv.ParseBool(probeValue)
}
actOnProbeValue, ok := os.LookupEnv("ULTRADNS_ENABLE_ACTONPROBE")
if ok {
if (actOnProbeValue != "true") && (actOnProbeValue != "false") {
return nil, fmt.Errorf("please set proper act on probe value, the values can be either true or false")
}
sbPoolActOnProbes, _ = strconv.ParseBool(actOnProbeValue)
}
poolValue, ok := os.LookupEnv("ULTRADNS_POOL_TYPE")
if ok {
if (poolValue != "sbpool") && (poolValue != "rdpool") {
return nil, fmt.Errorf(" please set proper ULTRADNS_POOL_TYPE, supported types are sbpool or rdpool")
}
ultradnsPoolType = poolValue
}
client, err := udnssdk.NewClient(username, string(password), baseURL)
if err != nil {
return nil, fmt.Errorf("connection cannot be established")
}
provider := &UltraDNSProvider{
client: *client,
domainFilter: domainFilter,
dryRun: dryRun,
}
return provider, nil
}
// Zones returns list of hosted zones
func (p *UltraDNSProvider) Zones(ctx context.Context) ([]udnssdk.Zone, error) {
zoneKey := &udnssdk.ZoneKey{}
var err error
if p.domainFilter.IsConfigured() {
zonesAppender := []udnssdk.Zone{}
for _, zone := range p.domainFilter.Filters {
zoneKey.Zone = zone
zoneKey.AccountName = accountName
zones, err := p.fetchZones(ctx, zoneKey)
if err != nil {
return nil, err
}
zonesAppender = append(zonesAppender, zones...)
}
return zonesAppender, nil
}
zoneKey.AccountName = accountName
zones, err := p.fetchZones(ctx, zoneKey)
if err != nil {
return nil, err
}
return zones, nil
}
func (p *UltraDNSProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {
var endpoints []*endpoint.Endpoint
zones, err := p.Zones(ctx)
if err != nil {
return nil, err
}
for _, zone := range zones {
log.Infof("zones : %v", zone)
var rrsetType string
var ownerName string
rrsetKey := udnssdk.RRSetKey{
Zone: zone.Properties.Name,
Type: rrsetType,
Name: ownerName,
}
if zone.Properties.ResourceRecordCount != 0 {
records, err := p.fetchRecords(ctx, rrsetKey)
if err != nil {
return nil, err
}
for _, r := range records {
recordTypeArray := strings.Fields(r.RRType)
if provider.SupportedRecordType(recordTypeArray[0]) {
log.Infof("owner name %s", r.OwnerName)
name := r.OwnerName
// root name is identified by the empty string and should be
// translated to zone name for the endpoint entry.
if r.OwnerName == "" {
name = zone.Properties.Name
}
endPointTTL := endpoint.NewEndpointWithTTL(name, recordTypeArray[0], endpoint.TTL(r.TTL), r.RData...)
endpoints = append(endpoints, endPointTTL)
}
}
}
}
log.Infof("endpoints %v", endpoints)
return endpoints, nil
}
func (p *UltraDNSProvider) fetchRecords(ctx context.Context, k udnssdk.RRSetKey) ([]udnssdk.RRSet, error) {
// Logic to paginate through all available results
maxerrs := 5
waittime := 5 * time.Second
rrsets := []udnssdk.RRSet{}
errcnt := 0
offset := 0
limit := 1000
for {
reqRrsets, ri, res, err := p.client.RRSets.SelectWithOffsetWithLimit(k, offset, limit)
if err != nil {
if res != nil && res.StatusCode >= 500 {
errcnt = errcnt + 1
if errcnt < maxerrs {
time.Sleep(waittime)
continue
}
}
return rrsets, err
}
rrsets = append(rrsets, reqRrsets...)
if ri.ReturnedCount+ri.Offset >= ri.TotalCount {
return rrsets, nil
}
offset = ri.ReturnedCount + ri.Offset
continue
}
}
func (p *UltraDNSProvider) fetchZones(ctx context.Context, zoneKey *udnssdk.ZoneKey) ([]udnssdk.Zone, error) {
// Logic to paginate through all available results
offset := 0
limit := 1000
maxerrs := 5
waittime := 5 * time.Second
zones := []udnssdk.Zone{}
errcnt := 0
for {
reqZones, ri, res, err := p.client.Zone.SelectWithOffsetWithLimit(zoneKey, offset, limit)
if err != nil {
if res != nil && res.StatusCode >= 500 {
errcnt = errcnt + 1
if errcnt < maxerrs {
time.Sleep(waittime)
continue
}
}
return zones, err
}
zones = append(zones, reqZones...)
if ri.ReturnedCount+ri.Offset >= ri.TotalCount {
return zones, nil
}
offset = ri.ReturnedCount + ri.Offset
continue
}
}
func (p *UltraDNSProvider) submitChanges(ctx context.Context, changes []*UltraDNSChanges) error {
cnameownerName := "cname"
txtownerName := "txt"
if len(changes) == 0 {
log.Infof("All records are already up to date")
return nil
}
zones, err := p.Zones(ctx)
if err != nil {
return err
}
zoneChanges := seperateChangeByZone(zones, changes)
for zoneName, changes := range zoneChanges {
for _, change := range changes {
if change.ResourceRecordSetUltraDNS.RRType == "CNAME" {
cnameownerName = change.ResourceRecordSetUltraDNS.OwnerName
} else if change.ResourceRecordSetUltraDNS.RRType == "TXT" {
txtownerName = change.ResourceRecordSetUltraDNS.OwnerName
}
if cnameownerName == txtownerName {
rrsetKey := udnssdk.RRSetKey{
Zone: zoneName,
Type: endpoint.RecordTypeCNAME,
Name: change.ResourceRecordSetUltraDNS.OwnerName,
}
err := p.getSpecificRecord(ctx, rrsetKey)
if err != nil {
return err
}
if !p.dryRun {
_, err = p.client.RRSets.Delete(rrsetKey)
if err != nil {
return err
}
}
return fmt.Errorf("the 'cname' and 'txt' record name cannot be same please recreate external-dns with - --txt-prefix=")
}
rrsetKey := udnssdk.RRSetKey{
Zone: zoneName,
Type: change.ResourceRecordSetUltraDNS.RRType,
Name: change.ResourceRecordSetUltraDNS.OwnerName,
}
record := udnssdk.RRSet{}
if (change.ResourceRecordSetUltraDNS.RRType == "A" || change.ResourceRecordSetUltraDNS.RRType == "AAAA") && (len(change.ResourceRecordSetUltraDNS.RData) >= 2) {
if ultradnsPoolType == "sbpool" && change.ResourceRecordSetUltraDNS.RRType == "A" {
sbPoolObject, _ := p.newSBPoolObjectCreation(ctx, change)
record = udnssdk.RRSet{
RRType: change.ResourceRecordSetUltraDNS.RRType,
OwnerName: change.ResourceRecordSetUltraDNS.OwnerName,
RData: change.ResourceRecordSetUltraDNS.RData,
TTL: change.ResourceRecordSetUltraDNS.TTL,
Profile: sbPoolObject.RawProfile(),
}
} else if ultradnsPoolType == "rdpool" {
rdPoolObject, _ := p.newRDPoolObjectCreation(ctx, change)
record = udnssdk.RRSet{
RRType: change.ResourceRecordSetUltraDNS.RRType,
OwnerName: change.ResourceRecordSetUltraDNS.OwnerName,
RData: change.ResourceRecordSetUltraDNS.RData,
TTL: change.ResourceRecordSetUltraDNS.TTL,
Profile: rdPoolObject.RawProfile(),
}
} else {
return fmt.Errorf("we do not support Multiple target 'aaaa' records in sb pool please contact to neustar for further details")
}
} else {
record = udnssdk.RRSet{
RRType: change.ResourceRecordSetUltraDNS.RRType,
OwnerName: change.ResourceRecordSetUltraDNS.OwnerName,
RData: change.ResourceRecordSetUltraDNS.RData,
TTL: change.ResourceRecordSetUltraDNS.TTL,
}
}
log.WithFields(log.Fields{
"record": record.OwnerName,
"type": record.RRType,
"ttl": record.TTL,
"action": change.Action,
"zone": zoneName,
"profile": record.Profile,
}).Info("Changing record.")
switch change.Action {
case ultradnsCreate:
if !p.dryRun {
res, err := p.client.RRSets.Create(rrsetKey, record)
_ = res
if err != nil {
return err
}
}
case ultradnsDelete:
err := p.getSpecificRecord(ctx, rrsetKey)
if err != nil {
return err
}
if !p.dryRun {
_, err = p.client.RRSets.Delete(rrsetKey)
if err != nil {
return err
}
}
case ultradnsUpdate:
err := p.getSpecificRecord(ctx, rrsetKey)
if err != nil {
return err
}
if !p.dryRun {
_, err = p.client.RRSets.Update(rrsetKey, record)
if err != nil {
return err
}
}
}
}
}
return nil
}
func (p *UltraDNSProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
combinedChanges := make([]*UltraDNSChanges, 0, len(changes.Create)+len(changes.UpdateNew)+len(changes.Delete))
log.Infof("value of changes %v,%v,%v", changes.Create, changes.UpdateNew, changes.Delete)
combinedChanges = append(combinedChanges, newUltraDNSChanges(ultradnsCreate, changes.Create)...)
combinedChanges = append(combinedChanges, newUltraDNSChanges(ultradnsUpdate, changes.UpdateNew)...)
combinedChanges = append(combinedChanges, newUltraDNSChanges(ultradnsDelete, changes.Delete)...)
return p.submitChanges(ctx, combinedChanges)
}
func newUltraDNSChanges(action string, endpoints []*endpoint.Endpoint) []*UltraDNSChanges {
changes := make([]*UltraDNSChanges, 0, len(endpoints))
var ttl int
for _, e := range endpoints {
if e.RecordTTL.IsConfigured() {
ttl = int(e.RecordTTL)
}
// Adding suffix dot to the record name
recordName := fmt.Sprintf("%s.", e.DNSName)
change := &UltraDNSChanges{
Action: action,
ResourceRecordSetUltraDNS: udnssdk.RRSet{
RRType: e.RecordType,
OwnerName: recordName,
RData: e.Targets,
TTL: ttl,
},
}
changes = append(changes, change)
}
return changes
}
func seperateChangeByZone(zones []udnssdk.Zone, changes []*UltraDNSChanges) map[string][]*UltraDNSChanges {
change := make(map[string][]*UltraDNSChanges)
zoneNameID := provider.ZoneIDName{}
for _, z := range zones {
zoneNameID.Add(z.Properties.Name, z.Properties.Name)
change[z.Properties.Name] = []*UltraDNSChanges{}
}
for _, c := range changes {
zone, _ := zoneNameID.FindZone(c.ResourceRecordSetUltraDNS.OwnerName)
if zone == "" {
log.Infof("Skipping record %s because no hosted zone matching record DNS Name was detected", c.ResourceRecordSetUltraDNS.OwnerName)
continue
}
change[zone] = append(change[zone], c)
}
return change
}
func (p *UltraDNSProvider) getSpecificRecord(ctx context.Context, rrsetKey udnssdk.RRSetKey) (err error) {
_, err = p.client.RRSets.Select(rrsetKey)
if err != nil {
return fmt.Errorf("no record was found for %v", rrsetKey)
}
return nil
}
// Creation of SBPoolObject
func (p *UltraDNSProvider) newSBPoolObjectCreation(ctx context.Context, change *UltraDNSChanges) (sbPool udnssdk.SBPoolProfile, err error) {
sbpoolRDataList := []udnssdk.SBRDataInfo{}
for range change.ResourceRecordSetUltraDNS.RData {
rrdataInfo := udnssdk.SBRDataInfo{
RunProbes: sbPoolRunProbes,
Priority: sbPoolPriority,
State: "NORMAL",
Threshold: 1,
Weight: nil,
}
sbpoolRDataList = append(sbpoolRDataList, rrdataInfo)
}
sbPoolObject := udnssdk.SBPoolProfile{
Context: udnssdk.SBPoolSchema,
Order: sbPoolOrder,
Description: change.ResourceRecordSetUltraDNS.OwnerName,
MaxActive: len(change.ResourceRecordSetUltraDNS.RData),
MaxServed: len(change.ResourceRecordSetUltraDNS.RData),
RDataInfo: sbpoolRDataList,
RunProbes: sbPoolRunProbes,
ActOnProbes: sbPoolActOnProbes,
}
return sbPoolObject, nil
}
//Creation of RDPoolObject
func (p *UltraDNSProvider) newRDPoolObjectCreation(ctx context.Context, change *UltraDNSChanges) (rdPool udnssdk.RDPoolProfile, err error) {
rdPoolObject := udnssdk.RDPoolProfile{
Context: udnssdk.RDPoolSchema,
Order: rdPoolOrder,
Description: change.ResourceRecordSetUltraDNS.OwnerName,
}
return rdPoolObject, nil
}

View File

@ -0,0 +1,776 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ultradns
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"reflect"
_ "strings"
"testing"
"github.com/stretchr/testify/assert"
udnssdk "github.com/ultradns/ultradns-sdk-go"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
)
type mockUltraDNSZone struct {
client *udnssdk.Client
}
func (m *mockUltraDNSZone) SelectWithOffsetWithLimit(k *udnssdk.ZoneKey, offset int, limit int) (zones []udnssdk.Zone, ResultInfo udnssdk.ResultInfo, resp *http.Response, err error) {
zones = []udnssdk.Zone{}
zone := udnssdk.Zone{}
zoneJson := `
{
"properties": {
"name":"test-ultradns-provider.com.",
"accountName":"teamrest",
"type":"PRIMARY",
"dnssecStatus":"UNSIGNED",
"status":"ACTIVE",
"owner":"teamrest",
"resourceRecordCount":7,
"lastModifiedDateTime":""
}
}`
if err := json.Unmarshal([]byte(zoneJson), &zone); err != nil {
log.Fatal(err)
}
zones = append(zones, zone)
return zones, udnssdk.ResultInfo{}, nil, nil
}
type mockUltraDNSRecord struct {
client *udnssdk.Client
}
func (m *mockUltraDNSRecord) Create(k udnssdk.RRSetKey, rrset udnssdk.RRSet) (*http.Response, error) {
return nil, nil
}
func (m *mockUltraDNSRecord) Select(k udnssdk.RRSetKey) ([]udnssdk.RRSet, error) {
return []udnssdk.RRSet{{
OwnerName: "test-ultradns-provider.com.",
RRType: endpoint.RecordTypeA,
RData: []string{"1.1.1.1"},
TTL: 86400,
}}, nil
}
func (m *mockUltraDNSRecord) SelectWithOffset(k udnssdk.RRSetKey, offset int) ([]udnssdk.RRSet, udnssdk.ResultInfo, *http.Response, error) {
return nil, udnssdk.ResultInfo{}, nil, nil
}
func (m *mockUltraDNSRecord) Update(udnssdk.RRSetKey, udnssdk.RRSet) (*http.Response, error) {
return nil, nil
}
func (m *mockUltraDNSRecord) Delete(k udnssdk.RRSetKey) (*http.Response, error) {
return nil, nil
}
func (m *mockUltraDNSRecord) SelectWithOffsetWithLimit(k udnssdk.RRSetKey, offset int, limit int) (rrsets []udnssdk.RRSet, ResultInfo udnssdk.ResultInfo, resp *http.Response, err error) {
return []udnssdk.RRSet{{
OwnerName: "test-ultradns-provider.com.",
RRType: endpoint.RecordTypeA,
RData: []string{"1.1.1.1"},
TTL: 86400,
}}, udnssdk.ResultInfo{}, nil, nil
}
// NewUltraDNSProvider Test scenario
func TestNewUltraDNSProvider(t *testing.T) {
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_, err := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.Nil(t, err)
_ = os.Unsetenv("ULTRADNS_PASSWORD")
_ = os.Unsetenv("ULTRADNS_USERNAME")
_ = os.Unsetenv("ULTRADNS_BASEURL")
_ = os.Unsetenv("ULTRADNS_ACCOUNTNAME")
_, err = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.NotNilf(t, err, "Expected to fail %s", "formatted")
}
//zones function test scenario
func TestUltraDNSProvider_Zones(t *testing.T) {
mocked := mockUltraDNSZone{}
provider := &UltraDNSProvider{
client: udnssdk.Client{
Zone: &mocked,
},
}
zoneKey := &udnssdk.ZoneKey{
Zone: "",
AccountName: "teamrest",
}
expected, _, _, err := provider.client.Zone.SelectWithOffsetWithLimit(zoneKey, 0, 1000)
assert.Nil(t, err)
zones, err := provider.Zones(context.Background())
assert.Nil(t, err)
assert.Equal(t, reflect.DeepEqual(expected, zones), true)
}
//Records function test case
func TestUltraDNSProvider_Records(t *testing.T) {
mocked := mockUltraDNSRecord{}
mockedDomain := mockUltraDNSZone{}
provider := &UltraDNSProvider{
client: udnssdk.Client{
RRSets: &mocked,
Zone: &mockedDomain,
},
}
rrsetKey := udnssdk.RRSetKey{}
expected, _, _, err := provider.client.RRSets.SelectWithOffsetWithLimit(rrsetKey, 0, 1000)
records, err := provider.Records(context.Background())
assert.Nil(t, err)
for _, v := range records {
assert.Equal(t, fmt.Sprintf("%s.", v.DNSName), expected[0].OwnerName)
assert.Equal(t, v.RecordType, expected[0].RRType)
assert.Equal(t, int(v.RecordTTL), expected[0].TTL)
}
}
//ApplyChanges function testcase
func TestUltraDNSProvider_ApplyChanges(t *testing.T) {
changes := &plan.Changes{}
mocked := mockUltraDNSRecord{nil}
mockedDomain := mockUltraDNSZone{nil}
provider := &UltraDNSProvider{
client: udnssdk.Client{
RRSets: &mocked,
Zone: &mockedDomain,
},
}
changes.Create = []*endpoint.Endpoint{
{DNSName: "test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.1.1"}, RecordType: "A"},
{DNSName: "ttl.test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.1.1"}, RecordType: "A", RecordTTL: 100},
}
changes.Create = []*endpoint.Endpoint{{DNSName: "test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.1.2"}, RecordType: "A"}}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.2.2"}, RecordType: "A", RecordTTL: 100}}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.2.2", "1.1.2.3", "1.1.2.4"}, RecordType: "A", RecordTTL: 100}}
changes.Delete = []*endpoint.Endpoint{{DNSName: "test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.2.2", "1.1.2.3", "1.1.2.4"}, RecordType: "A", RecordTTL: 100}}
changes.Delete = []*endpoint.Endpoint{{DNSName: "ttl.test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.1.1"}, RecordType: "A", RecordTTL: 100}}
err := provider.ApplyChanges(context.Background(), changes)
assert.Nilf(t, err, "Should not fail %s", "formatted")
}
// Testing function getSpecificRecord
func TestUltraDNSProvider_getSpecificRecord(t *testing.T) {
mocked := mockUltraDNSRecord{nil}
mockedDomain := mockUltraDNSZone{nil}
provider := &UltraDNSProvider{
client: udnssdk.Client{
RRSets: &mocked,
Zone: &mockedDomain,
},
}
recordSetKey := udnssdk.RRSetKey{
Zone: "test-ultradns-provider.com.",
Type: "A",
Name: "teamrest",
}
err := provider.getSpecificRecord(context.Background(), recordSetKey)
assert.Nil(t, err)
}
//Fail case scenario testing where CNAME and TXT Record name are same
func TestUltraDNSProvider_ApplyChangesCNAME(t *testing.T) {
changes := &plan.Changes{}
mocked := mockUltraDNSRecord{nil}
mockedDomain := mockUltraDNSZone{nil}
provider := &UltraDNSProvider{
client: udnssdk.Client{
RRSets: &mocked,
Zone: &mockedDomain,
},
}
changes.Create = []*endpoint.Endpoint{
{DNSName: "test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.1.1"}, RecordType: "CNAME"},
{DNSName: "test-ultradns-provider.com", Targets: endpoint.Targets{"1.1.1.1"}, RecordType: "TXT"},
}
err := provider.ApplyChanges(context.Background(), changes)
assert.NotNil(t, err)
}
// This will work if you would set the environment variables such as "ULTRADNS_INTEGRATION" and zone should be avaialble "kubernetes-ultradns-provider-test.com"
func TestUltraDNSProvider_ApplyChanges_Integration(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
providerUltradns, err := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes := &plan.Changes{}
changes.Create = []*endpoint.Endpoint{
{DNSName: "kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.1.1"}, RecordType: "A"},
{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, RecordType: "AAAA", RecordTTL: 100},
}
err = providerUltradns.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
rrsetKey := udnssdk.RRSetKey{
Zone: "kubernetes-ultradns-provider-test.com.",
Name: "kubernetes-ultradns-provider-test.com.",
Type: "A",
}
rrsets, _ := providerUltradns.client.RRSets.Select(rrsetKey)
assert.Equal(t, rrsets[0].RData[0], "1.1.1.1")
rrsetKey = udnssdk.RRSetKey{
Zone: "kubernetes-ultradns-provider-test.com.",
Name: "ttl.kubernetes-ultradns-provider-test.com.",
Type: "AAAA",
}
rrsets, _ = providerUltradns.client.RRSets.Select(rrsetKey)
assert.Equal(t, rrsets[0].RData[0], "2001:db8:85a3:0:0:8a2e:370:7334")
changes = &plan.Changes{}
changes.UpdateNew = []*endpoint.Endpoint{
{DNSName: "kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.2.2"}, RecordType: "A", RecordTTL: 100},
{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"2001:0db8:85a3:0000:0000:8a2e:0370:7335"}, RecordType: "AAAA", RecordTTL: 100}}
err = providerUltradns.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
rrsetKey = udnssdk.RRSetKey{
Zone: "kubernetes-ultradns-provider-test.com.",
Name: "kubernetes-ultradns-provider-test.com.",
Type: "A",
}
rrsets, _ = providerUltradns.client.RRSets.Select(rrsetKey)
assert.Equal(t, rrsets[0].RData[0], "1.1.2.2")
rrsetKey = udnssdk.RRSetKey{
Zone: "kubernetes-ultradns-provider-test.com.",
Name: "ttl.kubernetes-ultradns-provider-test.com.",
Type: "AAAA",
}
rrsets, _ = providerUltradns.client.RRSets.Select(rrsetKey)
assert.Equal(t, rrsets[0].RData[0], "2001:db8:85a3:0:0:8a2e:370:7335")
changes = &plan.Changes{}
changes.Delete = []*endpoint.Endpoint{
{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"2001:0db8:85a3:0000:0000:8a2e:0370:7335"}, RecordType: "AAAA", RecordTTL: 100},
{DNSName: "kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.2.2"}, RecordType: "A", RecordTTL: 100}}
err = providerUltradns.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
resp, _ := providerUltradns.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/AAAA/ttl.kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "404 Not Found")
resp, _ = providerUltradns.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/A/kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "404 Not Found")
}
}
// This will work if you would set the environment variables such as "ULTRADNS_INTEGRATION" and zone should be avaialble "kubernetes-ultradns-provider-test.com" for multiple target
func TestUltraDNSProvider_ApplyChanges_MultipleTarget_integeration(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
provider, err := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes := &plan.Changes{}
changes.Create = []*endpoint.Endpoint{
{DNSName: "kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.1.1", "1.1.2.2"}, RecordType: "A"}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
rrsetKey := udnssdk.RRSetKey{
Zone: "kubernetes-ultradns-provider-test.com.",
Name: "kubernetes-ultradns-provider-test.com.",
Type: "A",
}
rrsets, _ := provider.client.RRSets.Select(rrsetKey)
assert.Equal(t, rrsets[0].RData, []string{"1.1.1.1", "1.1.2.2"})
changes = &plan.Changes{}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.2.2", "192.168.0.24", "1.2.3.4"}, RecordType: "A", RecordTTL: 100}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
rrsetKey = udnssdk.RRSetKey{
Zone: "kubernetes-ultradns-provider-test.com.",
Name: "kubernetes-ultradns-provider-test.com.",
Type: "A",
}
rrsets, _ = provider.client.RRSets.Select(rrsetKey)
assert.Equal(t, rrsets[0].RData, []string{"1.1.2.2", "192.168.0.24", "1.2.3.4"})
changes = &plan.Changes{}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.2.2"}, RecordType: "A", RecordTTL: 100}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
rrsetKey = udnssdk.RRSetKey{
Zone: "kubernetes-ultradns-provider-test.com.",
Name: "kubernetes-ultradns-provider-test.com.",
Type: "A",
}
rrsets, _ = provider.client.RRSets.Select(rrsetKey)
assert.Equal(t, rrsets[0].RData, []string{"1.1.2.2"})
changes = &plan.Changes{}
changes.Delete = []*endpoint.Endpoint{{DNSName: "kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.2.2", "192.168.0.24"}, RecordType: "A"}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
resp, _ := provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/A/kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "404 Not Found")
}
}
// Test case to check sbpool creation
func TestUltraDNSProvider_newSBPoolObjectCreation(t *testing.T) {
mocked := mockUltraDNSRecord{nil}
mockedDomain := mockUltraDNSZone{nil}
provider := &UltraDNSProvider{
client: udnssdk.Client{
RRSets: &mocked,
Zone: &mockedDomain,
},
}
sbpoolRDataList := []udnssdk.SBRDataInfo{}
changes := &plan.Changes{}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "kubernetes-ultradns-provider-test.com.", Targets: endpoint.Targets{"1.1.2.2", "192.168.0.24"}, RecordType: "A", RecordTTL: 100}}
changesList := &UltraDNSChanges{
Action: "UPDATE",
ResourceRecordSetUltraDNS: udnssdk.RRSet{
RRType: "A",
OwnerName: "kubernetes-ultradns-provider-test.com.",
RData: []string{"1.1.2.2", "192.168.0.24"},
TTL: 100,
},
}
for _, _ = range changesList.ResourceRecordSetUltraDNS.RData {
rrdataInfo := udnssdk.SBRDataInfo{
RunProbes: true,
Priority: 1,
State: "NORMAL",
Threshold: 1,
Weight: nil,
}
sbpoolRDataList = append(sbpoolRDataList, rrdataInfo)
}
sbPoolObject := udnssdk.SBPoolProfile{
Context: udnssdk.SBPoolSchema,
Order: "ROUND_ROBIN",
Description: "kubernetes-ultradns-provider-test.com.",
MaxActive: 2,
MaxServed: 2,
RDataInfo: sbpoolRDataList,
RunProbes: true,
ActOnProbes: true,
}
actualSBPoolObject, _ := provider.newSBPoolObjectCreation(context.Background(), changesList)
assert.Equal(t, sbPoolObject, actualSBPoolObject)
}
//Testcase to check fail scenario for multiple AAAA targets
func TestUltraDNSProvider_MultipleTargetAAAA(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
_ = os.Setenv("ULTRADNS_POOL_TYPE", "sbpool")
provider, _ := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes := &plan.Changes{}
changes.Create = []*endpoint.Endpoint{
{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"}, RecordType: "AAAA", RecordTTL: 100},
}
err := provider.ApplyChanges(context.Background(), changes)
assert.NotNilf(t, err, "We wanted it to fail since multiple AAAA targets are not allowed %s", "formatted")
resp, _ := provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/AAAA/ttl.kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "404 Not Found")
_ = os.Unsetenv("ULTRADNS_POOL_TYPE")
}
}
//Testcase to check fail scenario for multiple AAAA targets
func TestUltraDNSProvider_MultipleTargetAAAARDPool(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
_ = os.Setenv("ULTRADNS_POOL_TYPE", "rdpool")
provider, _ := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes := &plan.Changes{}
changes.Create = []*endpoint.Endpoint{
{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"}, RecordType: "AAAA", RecordTTL: 100},
}
err := provider.ApplyChanges(context.Background(), changes)
assert.Nilf(t, err, " multiple AAAA targets are allowed when pool is RDPool %s", "formatted")
resp, _ := provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/AAAA/ttl.kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "200 OK")
changes = &plan.Changes{}
changes.Delete = []*endpoint.Endpoint{{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"}, RecordType: "AAAA"}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
resp, _ = provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/A/kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "404 Not Found")
}
}
// Test case to check multiple CNAME targets.
func TestUltraDNSProvider_MultipleTargetCNAME(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
provider, err := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes := &plan.Changes{}
changes.Create = []*endpoint.Endpoint{
{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"nginx.loadbalancer.com.", "nginx1.loadbalancer.com."}, RecordType: "CNAME", RecordTTL: 100},
}
err = provider.ApplyChanges(context.Background(), changes)
assert.NotNilf(t, err, "We wanted it to fail since multiple CNAME targets are not allowed %s", "formatted")
resp, _ := provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/CNAME/kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "404 Not Found")
}
}
//Testing creation of RD Pool
func TestUltraDNSProvider_newRDPoolObjectCreation(t *testing.T) {
mocked := mockUltraDNSRecord{nil}
mockedDomain := mockUltraDNSZone{nil}
provider := &UltraDNSProvider{
client: udnssdk.Client{
RRSets: &mocked,
Zone: &mockedDomain,
},
}
changes := &plan.Changes{}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "kubernetes-ultradns-provider-test.com.", Targets: endpoint.Targets{"1.1.2.2", "192.168.0.24"}, RecordType: "A", RecordTTL: 100}}
changesList := &UltraDNSChanges{
Action: "UPDATE",
ResourceRecordSetUltraDNS: udnssdk.RRSet{
RRType: "A",
OwnerName: "kubernetes-ultradns-provider-test.com.",
RData: []string{"1.1.2.2", "192.168.0.24"},
TTL: 100,
},
}
rdPoolObject := udnssdk.RDPoolProfile{
Context: udnssdk.RDPoolSchema,
Order: "ROUND_ROBIN",
Description: "kubernetes-ultradns-provider-test.com.",
}
actualRDPoolObject, _ := provider.newRDPoolObjectCreation(context.Background(), changesList)
assert.Equal(t, rdPoolObject, actualRDPoolObject)
}
//Testing Failure scenarios over NewUltraDNS Provider
func TestNewUltraDNSProvider_FailCases(t *testing.T) {
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_ = os.Setenv("ULTRADNS_POOL_TYPE", "xyz")
_, err := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.NotNilf(t, err, "Pool Type other than given type not working %s", "formatted")
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_ = os.Setenv("ULTRADNS_ENABLE_PROBING", "adefg")
_, err = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.NotNilf(t, err, "Probe value other than given values not working %s", "formatted")
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_ = os.Setenv("ULTRADNS_ENABLE_ACTONPROBE", "adefg")
_, err = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.NotNilf(t, err, "ActOnProbe value other than given values not working %s", "formatted")
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Unsetenv("ULTRADNS_PASSWORD")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_, err = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.NotNilf(t, err, "Expected to give error if password is not set %s", "formatted")
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Unsetenv("ULTRADNS_BASEURL")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_, err = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.NotNilf(t, err, "Expected to give error if baseurl is not set %s", "formatted")
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Unsetenv("ULTRADNS_ACCOUNTNAME")
_ = os.Unsetenv("ULTRADNS_ENABLE_ACTONPROBE")
_ = os.Unsetenv("ULTRADNS_ENABLE_PROBING")
_ = os.Unsetenv("ULTRADNS_POOL_TYPE")
_, accounterr := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.Nil(t, accounterr)
}
// Testing success scenarios for newly introduced environment variables
func TestNewUltraDNSProvider_NewEnvVariableSuccessCases(t *testing.T) {
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_ = os.Setenv("ULTRADNS_POOL_TYPE", "rdpool")
_, err := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.Nilf(t, err, "Pool Type not working in proper scenario %s", "formatted")
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_ = os.Setenv("ULTRADNS_ENABLE_PROBING", "false")
_, err1 := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.Nilf(t, err1, "Probe given value is not working %s", "formatted")
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_ = os.Setenv("ULTRADNS_ENABLE_ACTONPROBE", "true")
_, err2 := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.Nilf(t, err2, "ActOnProbe given value is not working %s", "formatted")
}
// Base64 Bad string decoding scenario
func TestNewUltraDNSProvider_Base64DecodeFailcase(t *testing.T) {
_ = os.Setenv("ULTRADNS_USERNAME", "")
_ = os.Setenv("ULTRADNS_PASSWORD", "12345")
_ = os.Setenv("ULTRADNS_BASEURL", "")
_ = os.Setenv("ULTRADNS_ACCOUNTNAME", "")
_ = os.Setenv("ULTRADNS_ENABLE_ACTONPROBE", "true")
_, err := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"test-ultradns-provider.com"}), true)
assert.NotNilf(t, err, "Base64 decode should fail in this case %s", "formatted")
}
func TestUltraDNSProvider_PoolConversionCase(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
//Creating SBPool Record
_ = os.Setenv("ULTRADNS_POOL_TYPE", "sbpool")
provider, _ := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes := &plan.Changes{}
changes.Create = []*endpoint.Endpoint{{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.1.1", "1.2.3.4"}, RecordType: "A", RecordTTL: 100}}
err := provider.ApplyChanges(context.Background(), changes)
assert.Nilf(t, err, " multiple A record creation with SBPool %s", "formatted")
resp, _ := provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/A/ttl.kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "200 OK")
//Coverting to RD Pool
_ = os.Setenv("ULTRADNS_POOL_TYPE", "rdpool")
provider, _ = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes = &plan.Changes{}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.1.1", "1.2.3.5"}, RecordType: "A"}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
resp, _ = provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/A/ttl.kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "200 OK")
//Coverting back to SB Pool
_ = os.Setenv("ULTRADNS_POOL_TYPE", "sbpool")
provider, _ = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com"}), false)
changes = &plan.Changes{}
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.1.1", "1.2.3.4"}, RecordType: "A"}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
resp, _ = provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/A/ttl.kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "200 OK")
//Deleting Record
changes = &plan.Changes{}
changes.Delete = []*endpoint.Endpoint{{DNSName: "ttl.kubernetes-ultradns-provider-test.com", Targets: endpoint.Targets{"1.1.1.1", "1.2.3.4"}, RecordType: "A"}}
err = provider.ApplyChanges(context.Background(), changes)
assert.Nil(t, err)
resp, _ = provider.client.Do("GET", "zones/kubernetes-ultradns-provider-test.com./rrsets/A/kubernetes-ultradns-provider-test.com.", nil, udnssdk.RRSetListDTO{})
assert.Equal(t, resp.Status, "404 Not Found")
}
}
func TestUltraDNSProvider_DomainFilter(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
provider, _ := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com", "kubernetes-ultradns-provider-test.com"}), true)
zones, err := provider.Zones(context.Background())
assert.Equal(t, zones[0].Properties.Name, "kubernetes-ultradns-provider-test.com.")
assert.Equal(t, zones[1].Properties.Name, "kubernetes-ultradns-provider-test.com.")
assert.Nilf(t, err, " Multiple domain filter failed %s", "formatted")
provider, _ = NewUltraDNSProvider(endpoint.NewDomainFilter([]string{}), true)
zones, err = provider.Zones(context.Background())
assert.Nilf(t, err, " Multiple domain filter failed %s", "formatted")
}
}
func TestUltraDNSProvider_DomainFiltersZonesFailCase(t *testing.T) {
_, ok := os.LookupEnv("ULTRADNS_INTEGRATION")
if !ok {
log.Printf("Skipping test")
} else {
provider, _ := NewUltraDNSProvider(endpoint.NewDomainFilter([]string{"kubernetes-ultradns-provider-test.com", "kubernetes-uldsvdsvadvvdsvadvstradns-provider-test.com"}), true)
_, err := provider.Zones(context.Background())
assert.NotNilf(t, err, " Multiple domain filter failed %s", "formatted")
}
}
//zones function with domain filter test scenario
func TestUltraDNSProvider_DomainFilterZonesMocked(t *testing.T) {
mocked := mockUltraDNSZone{}
provider := &UltraDNSProvider{
client: udnssdk.Client{
Zone: &mocked,
},
domainFilter: endpoint.NewDomainFilter([]string{"test-ultradns-provider.com."}),
}
zoneKey := &udnssdk.ZoneKey{
Zone: "test-ultradns-provider.com.",
AccountName: "",
}
// When AccountName not given
expected, _, _, err := provider.client.Zone.SelectWithOffsetWithLimit(zoneKey, 0, 1000)
assert.Nil(t, err)
zones, err := provider.Zones(context.Background())
assert.Nil(t, err)
assert.Equal(t, reflect.DeepEqual(expected, zones), true)
accountName = "teamrest"
// When AccountName is set
provider = &UltraDNSProvider{
client: udnssdk.Client{
Zone: &mocked,
},
domainFilter: endpoint.NewDomainFilter([]string{"test-ultradns-provider.com."}),
}
zoneKey = &udnssdk.ZoneKey{
Zone: "test-ultradns-provider.com.",
AccountName: "teamrest",
}
expected, _, _, err = provider.client.Zone.SelectWithOffsetWithLimit(zoneKey, 0, 1000)
assert.Nil(t, err)
zones, err = provider.Zones(context.Background())
assert.Nil(t, err)
assert.Equal(t, reflect.DeepEqual(expected, zones), true)
//When zone is not given but account is provided
provider = &UltraDNSProvider{
client: udnssdk.Client{
Zone: &mocked,
},
}
zoneKey = &udnssdk.ZoneKey{
AccountName: "teamrest",
}
expected, _, _, err = provider.client.Zone.SelectWithOffsetWithLimit(zoneKey, 0, 1000)
assert.Nil(t, err)
zones, err = provider.Zones(context.Background())
assert.Nil(t, err)
assert.Equal(t, reflect.DeepEqual(expected, zones), true)
}

View File

@ -25,6 +25,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/vultr/govultr"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"

View File

@ -251,10 +251,15 @@ func (pr affixNameMapper) toEndpointName(txtDNSName string) string {
func (pr affixNameMapper) toTXTName(endpointDNSName string) string {
DNSName := strings.SplitN(endpointDNSName, ".", 2)
// If specified, replace a leading asterisk in the generated txt record name with some other string
if pr.wildcardReplacement != "" && DNSName[0] == "*" {
DNSName[0] = pr.wildcardReplacement
}
if len(DNSName) < 2 {
return pr.prefix + DNSName[0] + pr.suffix
}
return pr.prefix + DNSName[0] + pr.suffix + "." + DNSName[1]
}

View File

@ -410,6 +410,7 @@ func testTXTRegistryApplyChangesWithPrefix(t *testing.T) {
Create: []*endpoint.Endpoint{
newEndpointWithOwnerResource("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"),
newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"),
newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"),
},
Delete: []*endpoint.Endpoint{
newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"),
@ -430,6 +431,8 @@ func testTXTRegistryApplyChangesWithPrefix(t *testing.T) {
newEndpointWithOwner("txt.new-record-1.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""),
newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "owner", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"),
newEndpointWithOwner("txt.multiple.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, "").WithSetIdentifier("test-set-3"),
newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "owner", "ingress/default/my-ingress"),
newEndpointWithOwner("txt.example", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""),
},
Delete: []*endpoint.Endpoint{
newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"),
@ -501,6 +504,7 @@ func testTXTRegistryApplyChangesWithSuffix(t *testing.T) {
Create: []*endpoint.Endpoint{
newEndpointWithOwnerResource("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"),
newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"),
newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"),
newEndpointWithOwnerResource("*.wildcard.test-zone.example.org", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"),
},
Delete: []*endpoint.Endpoint{
@ -522,6 +526,8 @@ func testTXTRegistryApplyChangesWithSuffix(t *testing.T) {
newEndpointWithOwner("new-record-1-txt.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""),
newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "owner", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"),
newEndpointWithOwner("multiple-txt.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, "").WithSetIdentifier("test-set-3"),
newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "owner", "ingress/default/my-ingress"),
newEndpointWithOwner("example-txt", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""),
newEndpointWithOwnerResource("*.wildcard.test-zone.example.org", "new-loadbalancer-1.lb.com", "", "owner", "ingress/default/my-ingress"),
newEndpointWithOwner("wildcard-txt.wildcard.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""),
},
@ -590,6 +596,7 @@ func testTXTRegistryApplyChangesNoPrefix(t *testing.T) {
changes := &plan.Changes{
Create: []*endpoint.Endpoint{
newEndpointWithOwner("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, ""),
newEndpointWithOwner("example", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, ""),
},
Delete: []*endpoint.Endpoint{
newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"),
@ -605,6 +612,8 @@ func testTXTRegistryApplyChangesNoPrefix(t *testing.T) {
Create: []*endpoint.Endpoint{
newEndpointWithOwner("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, "owner"),
newEndpointWithOwner("new-record-1.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner\"", endpoint.RecordTypeTXT, ""),
newEndpointWithOwner("example", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, "owner"),
newEndpointWithOwner("example", "\"heritage=external-dns,external-dns/owner=owner\"", endpoint.RecordTypeTXT, ""),
},
Delete: []*endpoint.Endpoint{
newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"),

View File

@ -28,6 +28,8 @@ import (
log "github.com/sirupsen/logrus"
networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
istioclient "istio.io/client-go/pkg/clientset/versioned"
istioinformers "istio.io/client-go/pkg/informers/externalversions"
networkingv1alpha3informer "istio.io/client-go/pkg/informers/externalversions/networking/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
@ -51,6 +53,7 @@ type gatewaySource struct {
combineFQDNAnnotation bool
ignoreHostnameAnnotation bool
serviceInformer coreinformers.ServiceInformer
gatewayInformer networkingv1alpha3informer.GatewayInformer
}
// NewIstioGatewaySource creates a new gatewaySource with the given config.
@ -81,6 +84,8 @@ func NewIstioGatewaySource(
// Set resync period to 0, to prevent processing when nothing has changed
informerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 0, kubeinformers.WithNamespace(namespace))
serviceInformer := informerFactory.Core().V1().Services()
istioInformerFactory := istioinformers.NewSharedInformerFactory(istioClient, 0)
gatewayInformer := istioInformerFactory.Networking().V1alpha3().Gateways()
// Add default resource event handlers to properly initialize informer.
serviceInformer.Informer().AddEventHandler(
@ -91,8 +96,17 @@ func NewIstioGatewaySource(
},
)
gatewayInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
log.Debug("gateway added")
},
},
)
// TODO informer is not explicitly stopped since controller is not passing in its channel.
informerFactory.Start(wait.NeverStop)
istioInformerFactory.Start(wait.NeverStop)
// wait for the local cache to be populated.
err = poll(time.Second, 60*time.Second, func() (bool, error) {
@ -102,6 +116,14 @@ func NewIstioGatewaySource(
return nil, fmt.Errorf("failed to sync cache: %v", err)
}
// wait for the local cache to be populated.
err = poll(time.Second, 60*time.Second, func() (bool, error) {
return gatewayInformer.Informer().HasSynced(), nil
})
if err != nil {
return nil, fmt.Errorf("failed to sync cache: %v", err)
}
return &gatewaySource{
kubeClient: kubeClient,
istioClient: istioClient,
@ -111,6 +133,7 @@ func NewIstioGatewaySource(
combineFQDNAnnotation: combineFQDNAnnotation,
ignoreHostnameAnnotation: ignoreHostnameAnnotation,
serviceInformer: serviceInformer,
gatewayInformer: gatewayInformer,
}, nil
}
@ -180,9 +203,23 @@ func (sc *gatewaySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, e
return endpoints, nil
}
// TODO(tariq1890): Implement this once we have evaluated and tested GatewayInformers
// AddEventHandler adds an event handler that should be triggered if the watched Istio Gateway changes.
func (sc *gatewaySource) AddEventHandler(ctx context.Context, handler func()) {
log.Debug("Adding event handler for Istio Gateway")
sc.gatewayInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
handler()
},
UpdateFunc: func(old interface{}, new interface{}) {
handler()
},
DeleteFunc: func(obj interface{}) {
handler()
},
},
)
}
// filterByAnnotations filters a list of configs by a given annotation selector.
@ -272,11 +309,6 @@ func (sc *gatewaySource) endpointsFromGateway(hostnames []string, gateway networ
providerSpecific, setIdentifier := getProviderSpecificAnnotations(annotations)
// Skip endpoints if we do not want entries from annotations
if !sc.ignoreHostnameAnnotation {
hostnames = append(hostnames, getHostnamesFromAnnotations(annotations)...)
}
for _, host := range hostnames {
endpoints = append(endpoints, endpointsForHostname(host, targets, ttl, providerSpecific, setIdentifier)...)
}
@ -300,9 +332,16 @@ func (sc *gatewaySource) hostNamesFromGateway(gateway networkingv1alpha3.Gateway
host = parts[1]
}
hostnames = append(hostnames, host)
if host != "*" {
hostnames = append(hostnames, host)
}
}
}
if !sc.ignoreHostnameAnnotation {
hostnames = append(hostnames, getHostnamesFromAnnotations(gateway.Annotations)...)
}
return hostnames, nil
}

View File

@ -1072,6 +1072,65 @@ func testGatewayEndpoints(t *testing.T) {
},
ignoreHostnameAnnotation: true,
},
{
title: "gateways with wildcard host",
targetNamespace: "",
lbServices: []fakeIngressGatewayService{
{
ips: []string{"1.2.3.4"},
},
},
configItems: []fakeGatewayConfig{
{
name: "fake1",
namespace: "",
dnsnames: [][]string{{"*"}},
},
{
name: "fake2",
namespace: "",
dnsnames: [][]string{{"some-namespace/*"}},
},
},
expected: []*endpoint.Endpoint{},
},
{
title: "gateways with wildcard host and hostname annotation",
targetNamespace: "",
lbServices: []fakeIngressGatewayService{
{
ips: []string{"1.2.3.4"},
},
},
configItems: []fakeGatewayConfig{
{
name: "fake1",
namespace: "",
annotations: map[string]string{
hostnameAnnotationKey: "fake1.dns-through-hostname.com",
},
dnsnames: [][]string{{"*"}},
},
{
name: "fake2",
namespace: "",
annotations: map[string]string{
hostnameAnnotationKey: "fake2.dns-through-hostname.com",
},
dnsnames: [][]string{{"some-namespace/*"}},
},
},
expected: []*endpoint.Endpoint{
{
DNSName: "fake1.dns-through-hostname.com",
Targets: endpoint.Targets{"1.2.3.4"},
},
{
DNSName: "fake2.dns-through-hostname.com",
Targets: endpoint.Targets{"1.2.3.4"},
},
},
},
} {
t.Run(ti.title, func(t *testing.T) {

View File

@ -1,3 +1,19 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package source
import (

View File

@ -34,6 +34,7 @@ import (
"time"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/endpoint"
)

View File

@ -28,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/internal/config"
)
@ -137,6 +138,12 @@ func getProviderSpecificAnnotations(annotations map[string]string) (endpoint.Pro
Name: fmt.Sprintf("aws/%s", attr),
Value: v,
})
} else if strings.HasPrefix(k, "external-dns.alpha.kubernetes.io/scw-") {
attr := strings.TrimPrefix(k, "external-dns.alpha.kubernetes.io/scw-")
providerSpecificAnnotations = append(providerSpecificAnnotations, endpoint.ProviderSpecificProperty{
Name: fmt.Sprintf("scw/%s", attr),
Value: v,
})
}
}
return providerSpecificAnnotations, setIdentifier

View File

@ -29,6 +29,8 @@ import (
log "github.com/sirupsen/logrus"
istioclient "istio.io/client-go/pkg/clientset/versioned"
istioinformers "istio.io/client-go/pkg/informers/externalversions"
networkingv1alpha3informer "istio.io/client-go/pkg/informers/externalversions/networking/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
@ -55,6 +57,7 @@ type virtualServiceSource struct {
combineFQDNAnnotation bool
ignoreHostnameAnnotation bool
serviceInformer coreinformers.ServiceInformer
virtualserviceInformer networkingv1alpha3informer.VirtualServiceInformer
}
// NewIstioVirtualServiceSource creates a new virtualServiceSource with the given config.
@ -85,6 +88,8 @@ func NewIstioVirtualServiceSource(
// Set resync period to 0, to prevent processing when nothing has changed
informerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 0, kubeinformers.WithNamespace(namespace))
serviceInformer := informerFactory.Core().V1().Services()
istioInformerFactory := istioinformers.NewSharedInformerFactory(istioClient, 0)
virtualServiceInformer := istioInformerFactory.Networking().V1alpha3().VirtualServices()
// Add default resource event handlers to properly initialize informer.
serviceInformer.Informer().AddEventHandler(
@ -95,8 +100,17 @@ func NewIstioVirtualServiceSource(
},
)
virtualServiceInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
log.Debug("virtual service added")
},
},
)
// TODO informer is not explicitly stopped since controller is not passing in its channel.
informerFactory.Start(wait.NeverStop)
istioInformerFactory.Start(wait.NeverStop)
// wait for the local cache to be populated.
err = wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
@ -106,6 +120,13 @@ func NewIstioVirtualServiceSource(
return nil, fmt.Errorf("failed to sync cache: %v", err)
}
err = wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
return virtualServiceInformer.Informer().HasSynced(), nil
})
if err != nil {
return nil, fmt.Errorf("failed to sync cache: %v", err)
}
return &virtualServiceSource{
kubeClient: kubeClient,
istioClient: istioClient,
@ -115,6 +136,7 @@ func NewIstioVirtualServiceSource(
combineFQDNAnnotation: combineFQDNAnnotation,
ignoreHostnameAnnotation: ignoreHostnameAnnotation,
serviceInformer: serviceInformer,
virtualserviceInformer: virtualServiceInformer,
}, nil
}
@ -179,9 +201,23 @@ func (sc *virtualServiceSource) Endpoints(ctx context.Context) ([]*endpoint.Endp
return endpoints, nil
}
// TODO(tariq1890): Implement this once we have evaluated and tested VirtualServiceInformers
// AddEventHandler adds an event handler that should be triggered if the watched Istio VirtualService changes.
func (sc *virtualServiceSource) AddEventHandler(ctx context.Context, handler func()) {
log.Debug("Adding event handler for Istio VirtualService")
sc.virtualserviceInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
handler()
},
UpdateFunc: func(old interface{}, new interface{}) {
handler()
},
DeleteFunc: func(obj interface{}) {
handler()
},
},
)
}
func (sc *virtualServiceSource) getGateway(ctx context.Context, gatewayStr string, virtualService networkingv1alpha3.VirtualService) *networkingv1alpha3.Gateway {