mirror of
https://github.com/kubernetes-sigs/external-dns.git
synced 2025-08-06 01:26:59 +02:00
Merge branch 'master' of https://github.com/kubernetes-sigs/external-dns into dennisme/minttl-ns1
This commit is contained in:
commit
f69a82f8c6
2
.github/ISSUE_TEMPLATE/-support-request.md
vendored
2
.github/ISSUE_TEMPLATE/-support-request.md
vendored
@ -12,7 +12,7 @@ STOP -- PLEASE READ!
|
||||
|
||||
GitHub is not the right place for support requests.
|
||||
|
||||
If you're looking for help, check our [docs](https://github.com/kubernetes-sigs/external-dns/tree/master/docs).
|
||||
If you're looking for help, check our [docs](https://github.com/kubernetes-sigs/external-dns/tree/HEAD/docs).
|
||||
|
||||
You can also post your question on the [Kubernetes Slack #external-dns](https://kubernetes.slack.com/archives/C771MKDKQ).
|
||||
|
||||
|
3
.github/labeler.yml
vendored
3
.github/labeler.yml
vendored
@ -64,3 +64,6 @@ provider/vinyldns: provider/vinyldns*
|
||||
|
||||
# Add 'provider/vultr' in file which starts with vultr
|
||||
provider/vultr: provider/vultr*
|
||||
|
||||
# Add 'provider/ultradns' in file which starts with ultradns
|
||||
provider/ultradns: provider/ultradns*
|
||||
|
3
.github/pull_request_template.md
vendored
Normal file
3
.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
## Checklist
|
||||
|
||||
- [ ] Update changelog in CHANGELOG.md, use section "Unreleased".
|
44
.github/workflows/ci.yml
vendored
Normal file
44
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.14
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install CI
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
make lint
|
||||
|
||||
- name: Coverage
|
||||
uses: shogo82148/actions-goveralls@v1
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
|
@ -1,23 +1,70 @@
|
||||
run:
|
||||
concurrency: 4
|
||||
|
||||
modules-download-mode: readonly
|
||||
|
||||
linters-settings:
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: false
|
||||
goimports:
|
||||
local-prefixes: github.com/kubernetes-sigs/external-dns
|
||||
golint:
|
||||
min-confidence: 0.9
|
||||
|
||||
gocyclo:
|
||||
min-complexity: 15
|
||||
min-confidence: 0.9
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
linters:
|
||||
# please, do not use `enable-all`: it's deprecated and will be removed soon.
|
||||
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- goprintffuncname
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- golint
|
||||
- goimports
|
||||
- misspell
|
||||
- unconvert
|
||||
- megacheck
|
||||
- interfacer
|
||||
- misspell
|
||||
- rowserrcheck
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
- whitespace
|
||||
|
||||
issues:
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- goprintffuncname
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- misspell
|
||||
- nolintlint
|
||||
- rowserrcheck
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
- whitespace
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- endpoint/zz_generated.deepcopy.go
|
||||
|
28
.travis.yml
28
.travis.yml
@ -1,28 +0,0 @@
|
||||
dist: xenial
|
||||
|
||||
os:
|
||||
- linux
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.14.x"
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
env:
|
||||
- GOLANGCI_RELEASE="v1.26.0"
|
||||
|
||||
before_install:
|
||||
- GO111MODULE=off go get github.com/mattn/goveralls
|
||||
- GO111MODULE=off go get github.com/lawrencewoodman/roveralls
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_RELEASE}
|
||||
|
||||
script:
|
||||
- make test
|
||||
- make lint
|
||||
- travis_wait 20 roveralls
|
||||
- goveralls -coverprofile=roveralls.coverprofile -service=travis-ci
|
80
CHANGELOG.md
80
CHANGELOG.md
@ -1,3 +1,83 @@
|
||||
## Unreleased
|
||||
|
||||
|
||||
## v0.7.3 - 2020-08-05
|
||||
|
||||
- Fix: add serviceaccount name in kustomize deployment (#1689) @jmthvt
|
||||
- Updates Oracle OCI SDK to latest (#1687) @ericrrath
|
||||
- UltraDNS Provider (#1635) @kbhandari
|
||||
- Update apiVersions in docs (#1690) @ddgenome
|
||||
- use the github actions build status badge (#1702) @tariq1890
|
||||
- Upgrade Oracle OCI SDK (#1688) @ericrrath
|
||||
- update dependencies and minor dep tree cleanup (#1692) @tariq1890
|
||||
- Update link for linode cloud manager (#1661) @phillc
|
||||
- Remove occurrences of "master" from the project (#1636) @Raffo
|
||||
- Create pull_request_template (#1662) @njuettner
|
||||
- dependencies: Upgrade all k8s client-go dependent sources to v1.18.X (#1627) @josephglanville
|
||||
- add GitHub Actions (#1657) @Raffo
|
||||
- add new source for istio virtual services (#1607) @tariq1890
|
||||
- use latest Alpine version in ExternalDNS dockerfile (#1655) @tariq1890
|
||||
- Update TTL docs to confirm DNSimple support (#1547) @weppos
|
||||
- rm unused flag param istio-ingressgateways (#1649) @tariq1890
|
||||
- Upgrade istio httpbin from 1.0 to 1.6 version (#1640) @ikovnatskymiacar
|
||||
- Add endpoints to kustomize base (#1638) @Raffo
|
||||
- DigitalOcean: support multiple targets per endpoint (#1595) @tdyas
|
||||
- Vultr : Version bump + changes (#1637) @ddymko
|
||||
- Hetzner DNS service support (#1570) @21h
|
||||
- Add OVH API rate limiting option (Fix #1546) (#1619) @Hugome
|
||||
- Add kustomize base (#1631) @Raffo
|
||||
- increase test timeout to fix intermittent failures of ingress tests (#1612) @tdyas
|
||||
- AWS: change the order of the actions, DELETE before CREATE fixes #1411 (#1555) @OmerKahani
|
||||
- Fix handling of DNS updates for RFC2136 provider. (#1613) @dmayle
|
||||
- digitalocean: increase API page size (#1611) @tdyas
|
||||
- improve linter quality for external-dns (#1618) @njuettner
|
||||
- fix convert int to string bug (#1620) @tariq1890
|
||||
|
||||
## v0.7.2 - 2020-06-03
|
||||
|
||||
- Update blogpost in README (#1610) @vanhumbeecka
|
||||
- Support for AWS Route53 in China (#1603) @greenu
|
||||
- Update Govcloud provider hosted zones (#1592) @clhuang
|
||||
- Fix issue with too large DNS messages (#1590) @dmayle
|
||||
- use the latest linode go version (#1587) @tariq1890
|
||||
- use istio client-go and clean up k8s deps (#1584) @tariq1890
|
||||
- Add owners for cloudflare and coredns providers (#1582) @Raffo
|
||||
- remove some code duplication in gateway source (#1575) @tariq1890
|
||||
- update Contour IngressRoute deps (#1569) @stevesloka
|
||||
- Make tests faster (#1568) @sheerun
|
||||
- Fix scheduling of reconciliation (#1567) @sheerun
|
||||
- fix minor typos in istio gateway source docs (#1566) @tariq1890
|
||||
- Provider structure refactor (#1565) @Raffo
|
||||
- Fix typo in ttl.md (#1564) @rtnpro
|
||||
- Fix goreportcard warnings (#1561) @squat
|
||||
- Use consistent headless service name in example (#1559) @lowkeyliesmyth
|
||||
- Update go versions to 1.14.x that were missed in commit 99cebfcf from PR #1476 (#1554) @stealthybox
|
||||
- Remove duplicate selector from DigitalOcean manifest (#1553) @ggordan
|
||||
- Upgrade DNSimple client and add support for contexts (#1551) @weppos
|
||||
- Upgrade github.com/miekg/dns to v1.1.25 (#1545) @davidcollom
|
||||
- Fix updates in CloudFlare provider (#1542) @sheerun
|
||||
- update readme for latest version (#1539) @elsesiy
|
||||
- Improve Cloudflare tests in preparation to fix other issues (#1537) @sheerun
|
||||
- Allow for custom property comparators (#1536) @sheerun
|
||||
- fix typo (#1535) @tmatias
|
||||
- Bump github.com/pkg/errors from 0.8.1 to 0.9.1 (#1531) @njuettner
|
||||
- Bump github.com/digitalocean/godo from 1.19.0 to 1.34.0 (#1530) @njuettner
|
||||
- Bump github.com/prometheus/client_golang from 1.0.0 to 1.5.1 (#1529) @njuettner
|
||||
- Bump github.com/akamai/AkamaiOPEN-edgegrid-golang from 0.9.10 to 0.9.11 (#1528) @njuettner
|
||||
- Fix RFC2316 Windows Documentation (#1516) @scottd018
|
||||
- remove dependency on kubernetes/kubernetes (#1513) @tariq1890
|
||||
- update akamai openapi dependency (#1511) @tariq1890
|
||||
- Vultr Provider (#1509) @ddymko
|
||||
- Add AWS region ap-east-1(HK) (#1497) @lovemai073
|
||||
- Fix: file coredns.go is not `goimports`-ed (#1496) @njuettner
|
||||
- Allow ZoneIDFilter for Cloudflare (#1494) @james-callahan
|
||||
- update etcd dependency to latest version (#1485) @tariq1890
|
||||
- Support for openshift routes (#1484) @jgrumboe
|
||||
- add --txt-suffix feature (#1483) @jgrumboe
|
||||
- update to go 1.14 (#1476) @jochen42
|
||||
- Multiple A records support for the same FQDN (#1475) @ytsarev
|
||||
- Implement annotation filter for CRD source (#1399) @ytsarev
|
||||
|
||||
## v0.7.1 - 2020-04-01
|
||||
|
||||
- Prometheus metric: timestamp of last successful sync with the DNS provider (#1480) @njuettner
|
||||
|
@ -15,8 +15,6 @@
|
||||
# builder image
|
||||
FROM golang:1.14 as builder
|
||||
|
||||
ARG VERSION
|
||||
|
||||
WORKDIR /sigs.k8s.io/external-dns
|
||||
|
||||
COPY . .
|
||||
@ -25,10 +23,10 @@ RUN go mod vendor && \
|
||||
make build
|
||||
|
||||
# final image
|
||||
FROM alpine:3.11.5
|
||||
FROM alpine:3.12
|
||||
LABEL maintainer="Team Teapot @ Zalando SE <team-teapot@zalando.de>"
|
||||
|
||||
RUN apk add --no-cache ca-certificates && \
|
||||
RUN apk add --update --no-cache ca-certificates && \
|
||||
update-ca-certificates
|
||||
|
||||
COPY --from=builder /sigs.k8s.io/external-dns/build/external-dns /bin/external-dns
|
||||
|
4
Makefile
4
Makefile
@ -31,14 +31,14 @@ cover-html: cover
|
||||
|
||||
# Run all the linters
|
||||
lint:
|
||||
golangci-lint run --timeout=5m ./...
|
||||
golangci-lint run --timeout=15m ./...
|
||||
|
||||
|
||||
# The verify target runs tasks similar to the CI tasks, but without code coverage
|
||||
.PHONY: verify test
|
||||
|
||||
test:
|
||||
go test -v -race $(shell go list ./... | grep -v /vendor/)
|
||||
go test -race ./...
|
||||
|
||||
# The build targets allow to build the binary and docker image
|
||||
.PHONY: build build.docker build.mini
|
||||
|
2
OWNERS
2
OWNERS
@ -1,5 +1,5 @@
|
||||
# See the OWNERS file documentation:
|
||||
# https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
|
||||
# https://github.com/kubernetes/community/blob/HEAD/contributors/guide/owners.md
|
||||
|
||||
approvers:
|
||||
- hjacobs
|
||||
|
19
README.md
19
README.md
@ -3,8 +3,8 @@
|
||||
</p>
|
||||
|
||||
# ExternalDNS
|
||||
[](https://travis-ci.org/kubernetes-sigs/external-dns)
|
||||
[](https://coveralls.io/github/kubernetes-sigs/external-dns?branch=master)
|
||||
[](https://github.com/kubernetes-sigs/external-dns/actions)
|
||||
[](https://coveralls.io/github/kubernetes-sigs/external-dns)
|
||||
[](https://github.com/kubernetes-sigs/external-dns/releases)
|
||||
[](https://godoc.org/github.com/kubernetes-sigs/external-dns)
|
||||
[](https://goreportcard.com/report/github.com/kubernetes-sigs/external-dns)
|
||||
@ -19,7 +19,7 @@ In a broader sense, ExternalDNS allows you to control DNS records dynamically vi
|
||||
|
||||
The [FAQ](docs/faq.md) contains additional information and addresses several questions about key concepts of ExternalDNS.
|
||||
|
||||
To see ExternalDNS in action, have a look at this [video](https://www.youtube.com/watch?v=9HQ2XgL9YVI) or read this [blogpost](https://medium.com/wearetheledger/deploying-test-environments-with-azure-devops-eks-and-externaldns-67abe647e4e).
|
||||
To see ExternalDNS in action, have a look at this [video](https://www.youtube.com/watch?v=9HQ2XgL9YVI) or read this [blogpost](https://codemine.be/posts/20190125-devops-eks-externaldns/).
|
||||
|
||||
## The Latest Release: v0.7
|
||||
|
||||
@ -31,6 +31,7 @@ ExternalDNS' current release is `v0.7`. This version allows you to keep selected
|
||||
* [CloudFlare](https://www.cloudflare.com/dns)
|
||||
* [RcodeZero](https://www.rcodezero.at/)
|
||||
* [DigitalOcean](https://www.digitalocean.com/products/networking)
|
||||
* [Hetzner](https://hetzner.com/)
|
||||
* [DNSimple](https://dnsimple.com/)
|
||||
* [Infoblox](https://www.infoblox.com/products/dns/)
|
||||
* [Dyn](https://dyn.com/dns/)
|
||||
@ -79,6 +80,7 @@ The following table clarifies the current status of the providers according to t
|
||||
| CloudFlare | Beta | |
|
||||
| RcodeZero | Alpha | |
|
||||
| DigitalOcean | Alpha | |
|
||||
| Hetzner | Alpha | @21h |
|
||||
| DNSimple | Alpha | |
|
||||
| Infoblox | Alpha | @saileshgiri |
|
||||
| Dyn | Alpha | |
|
||||
@ -96,6 +98,7 @@ The following table clarifies the current status of the providers according to t
|
||||
| Akamai FastDNS | Alpha | |
|
||||
| OVH | Alpha | |
|
||||
| Vultr | Alpha | |
|
||||
| UltraDNS | Alpha | |
|
||||
|
||||
## Running ExternalDNS:
|
||||
|
||||
@ -120,6 +123,7 @@ The following tutorials are provided:
|
||||
* [Cloudflare](docs/tutorials/cloudflare.md)
|
||||
* [CoreDNS](docs/tutorials/coredns.md)
|
||||
* [DigitalOcean](docs/tutorials/digitalocean.md)
|
||||
* [Hetzner](docs/tutorials/hetzner.md)
|
||||
* [DNSimple](docs/tutorials/dnsimple.md)
|
||||
* [Dyn](docs/tutorials/dyn.md)
|
||||
* [Exoscale](docs/tutorials/exoscale.md)
|
||||
@ -144,6 +148,7 @@ The following tutorials are provided:
|
||||
* [VinylDNS](docs/tutorials/vinyldns.md)
|
||||
* [OVH](docs/tutorials/ovh.md)
|
||||
* [Vultr](docs/tutorials/vultr.md)
|
||||
* [UltraDNS](docs/tutorials/ultradns.md)
|
||||
|
||||
### Running Locally
|
||||
|
||||
@ -173,7 +178,7 @@ Assuming Go has been setup with module support it can be built simply by running
|
||||
$ make
|
||||
```
|
||||
|
||||
This will create external-dns in the build directory directly from master.
|
||||
This will create external-dns in the build directory directly from the default branch.
|
||||
|
||||
Next, run an application and expose it via a Kubernetes Service:
|
||||
|
||||
@ -273,12 +278,12 @@ Here's a rough outline on what is to come (subject to change):
|
||||
|
||||
### v0.6
|
||||
|
||||
- [ ] Ability to replace Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/master/dns-controller) (This could also directly become `v1.0`)
|
||||
- [ ] Ability to replace Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/HEAD/dns-controller) (This could also directly become `v1.0`)
|
||||
- [x] Support for OVH
|
||||
|
||||
### v1.0
|
||||
|
||||
- [ ] Ability to replace Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/master/dns-controller)
|
||||
- [ ] Ability to replace Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/HEAD/dns-controller)
|
||||
- [x] Ability to replace Zalando's [Mate](https://github.com/linki/mate)
|
||||
- [x] Ability to replace Molecule Software's [route53-kubernetes](https://github.com/wearemolecule/route53-kubernetes)
|
||||
|
||||
@ -319,7 +324,7 @@ For an overview on how to write new Sources and Providers check out [Sources and
|
||||
|
||||
ExternalDNS is an effort to unify the following similar projects in order to bring the Kubernetes community an easy and predictable way of managing DNS records across cloud providers based on their Kubernetes resources:
|
||||
|
||||
* Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/master/dns-controller)
|
||||
* Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/HEAD/dns-controller)
|
||||
* Zalando's [Mate](https://github.com/linki/mate)
|
||||
* Molecule Software's [route53-kubernetes](https://github.com/wearemolecule/route53-kubernetes)
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
# to for triaging and handling of incoming issues.
|
||||
#
|
||||
# The below names agree to abide by the
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/HEAD/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# and will be removed and replaced if they violate that agreement.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
|
@ -18,6 +18,7 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -112,6 +113,10 @@ type Controller struct {
|
||||
Interval time.Duration
|
||||
// The DomainFilter defines which DNS records to keep or exclude
|
||||
DomainFilter endpoint.DomainFilter
|
||||
// The nextRunAt used for throttling and batching reconciliation
|
||||
nextRunAt time.Time
|
||||
// The nextRunAtMux is for atomic updating of nextRunAt
|
||||
nextRunAtMux sync.Mutex
|
||||
}
|
||||
|
||||
// RunOnce runs a single iteration of a reconciliation loop.
|
||||
@ -126,7 +131,7 @@ func (c *Controller) RunOnce(ctx context.Context) error {
|
||||
|
||||
ctx = context.WithValue(ctx, provider.RecordsContextKey, records)
|
||||
|
||||
endpoints, err := c.Source.Endpoints()
|
||||
endpoints, err := c.Source.Endpoints(ctx)
|
||||
if err != nil {
|
||||
sourceErrorsTotal.Inc()
|
||||
deprecatedSourceErrors.Inc()
|
||||
@ -135,10 +140,11 @@ func (c *Controller) RunOnce(ctx context.Context) error {
|
||||
sourceEndpointsTotal.Set(float64(len(endpoints)))
|
||||
|
||||
plan := &plan.Plan{
|
||||
Policies: []plan.Policy{c.Policy},
|
||||
Current: records,
|
||||
Desired: endpoints,
|
||||
DomainFilter: c.DomainFilter,
|
||||
Policies: []plan.Policy{c.Policy},
|
||||
Current: records,
|
||||
Desired: endpoints,
|
||||
DomainFilter: c.DomainFilter,
|
||||
PropertyComparator: c.Registry.PropertyValuesEqual,
|
||||
}
|
||||
|
||||
plan = plan.Calculate()
|
||||
@ -154,18 +160,39 @@ func (c *Controller) RunOnce(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs RunOnce in a loop with a delay until stopChan receives a value.
|
||||
func (c *Controller) Run(ctx context.Context, stopChan <-chan struct{}) {
|
||||
ticker := time.NewTicker(c.Interval)
|
||||
// MinInterval is used as window for batching events
|
||||
const MinInterval = 5 * time.Second
|
||||
|
||||
// RunOnceThrottled makes sure execution happens at most once per interval.
|
||||
func (c *Controller) ScheduleRunOnce(now time.Time) {
|
||||
c.nextRunAtMux.Lock()
|
||||
defer c.nextRunAtMux.Unlock()
|
||||
c.nextRunAt = now.Add(MinInterval)
|
||||
}
|
||||
|
||||
func (c *Controller) ShouldRunOnce(now time.Time) bool {
|
||||
c.nextRunAtMux.Lock()
|
||||
defer c.nextRunAtMux.Unlock()
|
||||
if now.Before(c.nextRunAt) {
|
||||
return false
|
||||
}
|
||||
c.nextRunAt = now.Add(c.Interval)
|
||||
return true
|
||||
}
|
||||
|
||||
// Run runs RunOnce in a loop with a delay until context is canceled
|
||||
func (c *Controller) Run(ctx context.Context) {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
err := c.RunOnce(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
if c.ShouldRunOnce(time.Now()) {
|
||||
if err := c.RunOnce(ctx); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-stopChan:
|
||||
case <-ctx.Done():
|
||||
log.Info("Terminating main controller loop")
|
||||
return
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
|
||||
// mockProvider returns mock endpoints and validates changes.
|
||||
type mockProvider struct {
|
||||
provider.BaseProvider
|
||||
RecordsStore []*endpoint.Endpoint
|
||||
ExpectChanges *plan.Changes
|
||||
}
|
||||
@ -153,43 +154,41 @@ func TestRunOnce(t *testing.T) {
|
||||
source.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// TestSourceEventHandler tests that the Controller can use a Source's registered handler as a callback.
|
||||
func TestSourceEventHandler(t *testing.T) {
|
||||
source := new(testutils.MockSource)
|
||||
func TestShouldRunOnce(t *testing.T) {
|
||||
ctrl := &Controller{Interval: 10 * time.Minute}
|
||||
|
||||
handlerCh := make(chan bool)
|
||||
timeoutCh := make(chan bool, 1)
|
||||
stopChan := make(chan struct{}, 1)
|
||||
now := time.Now()
|
||||
|
||||
ctrl := &Controller{
|
||||
Source: source,
|
||||
Registry: nil,
|
||||
Policy: &plan.SyncPolicy{},
|
||||
}
|
||||
// First run of Run loop should execute RunOnce
|
||||
assert.True(t, ctrl.ShouldRunOnce(now))
|
||||
|
||||
// Define and register a simple handler that sends a message to a channel to show it was called.
|
||||
handler := func() error {
|
||||
handlerCh <- true
|
||||
return nil
|
||||
}
|
||||
// Example of preventing handler from being called more than once every 5 seconds.
|
||||
ctrl.Source.AddEventHandler(handler, stopChan, 5*time.Second)
|
||||
// Second run should not
|
||||
assert.False(t, ctrl.ShouldRunOnce(now))
|
||||
|
||||
// Send timeout message after 10 seconds to fail test if handler is not called.
|
||||
go func() {
|
||||
time.Sleep(10 * time.Second)
|
||||
timeoutCh <- true
|
||||
}()
|
||||
now = now.Add(10 * time.Second)
|
||||
// Changes happen in ingresses or services
|
||||
ctrl.ScheduleRunOnce(now)
|
||||
ctrl.ScheduleRunOnce(now)
|
||||
|
||||
// Wait until we either receive a message from handlerCh or timeoutCh channel after 10 seconds.
|
||||
select {
|
||||
case msg := <-handlerCh:
|
||||
assert.True(t, msg)
|
||||
case <-timeoutCh:
|
||||
assert.Fail(t, "timed out waiting for event handler to be called")
|
||||
}
|
||||
// Because we batch changes, ShouldRunOnce returns False at first
|
||||
assert.False(t, ctrl.ShouldRunOnce(now))
|
||||
assert.False(t, ctrl.ShouldRunOnce(now.Add(100*time.Microsecond)))
|
||||
|
||||
close(stopChan)
|
||||
close(handlerCh)
|
||||
close(timeoutCh)
|
||||
// But after MinInterval we should run reconciliation
|
||||
now = now.Add(MinInterval)
|
||||
assert.True(t, ctrl.ShouldRunOnce(now))
|
||||
|
||||
// But just one time
|
||||
assert.False(t, ctrl.ShouldRunOnce(now))
|
||||
|
||||
// We should wait maximum possible time after last reconciliation started
|
||||
now = now.Add(10*time.Minute - time.Second)
|
||||
assert.False(t, ctrl.ShouldRunOnce(now))
|
||||
|
||||
// After exactly Interval it's OK again to reconcile
|
||||
now = now.Add(time.Second)
|
||||
assert.True(t, ctrl.ShouldRunOnce(now))
|
||||
|
||||
// But not two times
|
||||
assert.False(t, ctrl.ShouldRunOnce(now))
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ When the project was proposed (see the [original discussion](https://github.com/
|
||||
|
||||
* Mate - [https://github.com/linki/mate](https://github.com/linki/mate)
|
||||
|
||||
* DNS-controller from kops - [https://github.com/kubernetes/kops/tree/master/dns-controller](https://github.com/kubernetes/kops/tree/master/dns-controller)
|
||||
* DNS-controller from kops - [https://github.com/kubernetes/kops/tree/HEAD/dns-controller](https://github.com/kubernetes/kops/tree/HEAD/dns-controller)
|
||||
|
||||
* Route53-kubernetes - [https://github.com/wearemolecule/route53-kubernetes](https://github.com/wearemolecule/route53-kubernetes)
|
||||
|
||||
@ -135,7 +135,7 @@ The docker registry service is provided as best effort with no sort of SLA and t
|
||||
|
||||
Providing a vanity URL for the docker images was consider a non goal till now, but the community seems to be wanting official images from a GCR domain, similarly to what is available for other parts of official Kubernetes projects.
|
||||
|
||||
ExternalDNS does not follow a specific release cycle. Releases are made often when there are major contributions (i.e. new providers) or important bug fixes. That said, the master is considered stable and can be used as well to build images.
|
||||
ExternalDNS does not follow a specific release cycle. Releases are made often when there are major contributions (i.e. new providers) or important bug fixes. That said, the default branch is considered stable and can be used as well to build images.
|
||||
|
||||
### Risks and Mitigations
|
||||
|
||||
|
@ -4,12 +4,12 @@ CRD source provides a generic mechanism to manage DNS records in your favourite
|
||||
|
||||
### Details
|
||||
|
||||
CRD source watches for a user specified CRD to extract [Endpoints](https://github.com/kubernetes-sigs/external-dns/blob/master/endpoint/endpoint.go) from its `Spec`.
|
||||
CRD source watches for a user specified CRD to extract [Endpoints](https://github.com/kubernetes-sigs/external-dns/blob/HEAD/endpoint/endpoint.go) from its `Spec`.
|
||||
So users need to create such a CRD and register it to the kubernetes cluster and then create new object(s) of the CRD specifying the Endpoints.
|
||||
|
||||
### Registering CRD
|
||||
|
||||
Here is typical example of [CRD API type](https://github.com/kubernetes-sigs/external-dns/blob/master/endpoint/endpoint.go) which provides Endpoints to `CRD source`:
|
||||
Here is typical example of [CRD API type](https://github.com/kubernetes-sigs/external-dns/blob/HEAD/endpoint/endpoint.go) which provides Endpoints to `CRD source`:
|
||||
|
||||
```go
|
||||
type TTL int64
|
||||
@ -100,7 +100,6 @@ Run external-dns in dry-mode to see whether external-dns picks up the DNS record
|
||||
|
||||
```
|
||||
$ build/external-dns --source crd --crd-source-apiversion externaldns.k8s.io/v1alpha1 --crd-source-kind DNSEndpoint --provider inmemory --once --dry-run
|
||||
INFO[0000] config: {Master: KubeConfig: Sources:[crd] Namespace: AnnotationFilter: FQDNTemplate: CombineFQDNAndAnnotation:false Compatibility: PublishInternal:false PublishHostIP:false ConnectorSourceServer:localhost:8080 Provider:inmemory GoogleProject: DomainFilter:[] ZoneIDFilter:[] AWSZoneType: AWSAssumeRole: AWSMaxChangeCount:4000 AWSEvaluateTargetHealth:true AzureConfigFile:/etc/kubernetes/azure.json AzureResourceGroup: CloudflareProxied:false InfobloxGridHost: InfobloxWapiPort:443 InfobloxWapiUsername:admin InfobloxWapiPassword: InfobloxWapiVersion:2.3.1 InfobloxSSLVerify:true DynCustomerName: DynUsername: DynPassword: DynMinTTLSeconds:0 OCIConfigFile:/etc/kubernetes/oci.yaml InMemoryZones:[] PDNSServer:http://localhost:8081 PDNSAPIKey: PDNSTLSEnabled:false TLSCA: TLSClientCert: TLSClientCertKey: Policy:sync Registry:txt TXTOwnerID:default TXTPrefix: Interval:1m0s Once:true DryRun:true LogFormat:text MetricsAddress::7979 LogLevel:info TXTCacheInterval:0s ExoscaleEndpoint:https://community.exoscale.com/documentation/dns/api/ ExoscaleAPIKey: ExoscaleAPISecret: CRDSourceAPIVersion:externaldns.k8s.io/v1alpha1 CRDSourceKind:DNSEndpoint}
|
||||
INFO[0000] running in dry-run mode. No changes to DNS records will be made.
|
||||
INFO[0000] Connected to cluster at https://192.168.99.100:8443
|
||||
INFO[0000] CREATE: foo.bar.com 180 IN A 192.168.99.216
|
||||
|
12
docs/faq.md
12
docs/faq.md
@ -75,13 +75,13 @@ Regarding Ingress, we'll support:
|
||||
* Google's Ingress Controller on GKE that integrates with their Layer 7 load balancers (GLBC)
|
||||
* nginx-ingress-controller v0.9.x with a fronting Service
|
||||
* Zalando's [AWS Ingress controller](https://github.com/zalando-incubator/kube-ingress-aws-controller), based on AWS ALBs and [Skipper](https://github.com/zalando/skipper)
|
||||
* [Traefik](https://github.com/containous/traefik) 1.7 and above, when [`kubernetes.ingressEndpoint`](https://docs.traefik.io/v1.7/configuration/backends/kubernetes/#ingressendpoint) is configured (`kubernetes.ingressEndpoint.useDefaultPublishedService` in the [Helm chart](https://github.com/helm/charts/tree/master/stable/traefik#configuration))
|
||||
* [Traefik](https://github.com/containous/traefik) 1.7 and above, when [`kubernetes.ingressEndpoint`](https://docs.traefik.io/v1.7/configuration/backends/kubernetes/#ingressendpoint) is configured (`kubernetes.ingressEndpoint.useDefaultPublishedService` in the [Helm chart](https://github.com/helm/charts/tree/HEAD/stable/traefik#configuration))
|
||||
|
||||
### Are other Ingress Controllers supported?
|
||||
|
||||
For Ingress objects, ExternalDNS will attempt to discover the target hostname of the relevant Ingress Controller automatically. If you are using an Ingress Controller that is not listed above you may have issues with ExternalDNS not discovering Endpoints and consequently not creating any DNS records. As a workaround, it is possible to force create an Endpoint by manually specifying a target host/IP for the records to be created by setting the annotation `external-dns.alpha.kubernetes.io/target` in the Ingress object.
|
||||
|
||||
Another reason you may want to override the ingress hostname or IP address is if you have an external mechanism for handling failover across ingress endpoints. Possible scenarios for this would include using [keepalived-vip](https://github.com/kubernetes/contrib/tree/master/keepalived-vip) to manage failover faster than DNS TTLs might expire.
|
||||
Another reason you may want to override the ingress hostname or IP address is if you have an external mechanism for handling failover across ingress endpoints. Possible scenarios for this would include using [keepalived-vip](https://github.com/kubernetes/contrib/tree/HEAD/keepalived-vip) to manage failover faster than DNS TTLs might expire.
|
||||
|
||||
Note that if you set the target to a hostname, then a CNAME record will be created. In this case, the hostname specified in the Ingress object's annotation must already exist. (i.e. you have a Service resource for your Ingress Controller with the `external-dns.alpha.kubernetes.io/hostname` annotation set to the same value.)
|
||||
|
||||
@ -89,7 +89,7 @@ Note that if you set the target to a hostname, then a CNAME record will be creat
|
||||
|
||||
ExternalDNS is a joint effort to unify different projects accomplishing the same goals, namely:
|
||||
|
||||
* Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/master/dns-controller)
|
||||
* Kops' [DNS Controller](https://github.com/kubernetes/kops/tree/HEAD/dns-controller)
|
||||
* Zalando's [Mate](https://github.com/linki/mate)
|
||||
* Molecule Software's [route53-kubernetes](https://github.com/wearemolecule/route53-kubernetes)
|
||||
|
||||
@ -205,7 +205,7 @@ $ docker run \
|
||||
-e EXTERNAL_DNS_PROVIDER=google \
|
||||
-e EXTERNAL_DNS_DOMAIN_FILTER=$'foo.com\nbar.com' \
|
||||
registry.opensource.zalan.do/teapot/external-dns:latest
|
||||
time="2017-08-08T14:10:26Z" level=info msg="config: &{Master: KubeConfig: Sources:[service ingress] Namespace: ...
|
||||
time="2017-08-08T14:10:26Z" level=info msg="config: &{APIServerURL: KubeConfig: Sources:[service ingress] Namespace: ...
|
||||
```
|
||||
|
||||
Locally:
|
||||
@ -213,12 +213,12 @@ Locally:
|
||||
```console
|
||||
$ export EXTERNAL_DNS_SOURCE=$'service\ningress'
|
||||
$ external-dns --provider=google
|
||||
INFO[0000] config: &{Master: KubeConfig: Sources:[service ingress] Namespace: ...
|
||||
INFO[0000] config: &{APIServerURL: KubeConfig: Sources:[service ingress] Namespace: ...
|
||||
```
|
||||
|
||||
```
|
||||
$ EXTERNAL_DNS_SOURCE=$'service\ningress' external-dns --provider=google
|
||||
INFO[0000] config: &{Master: KubeConfig: Sources:[service ingress] Namespace: ...
|
||||
INFO[0000] config: &{APIServerURL: KubeConfig: Sources:[service ingress] Namespace: ...
|
||||
```
|
||||
|
||||
In a Kubernetes manifest:
|
||||
|
@ -10,7 +10,7 @@ This document describes the initial design proposal.
|
||||
|
||||
External DNS is purposed to fill the existing gap of creating DNS records for Kubernetes resources. While there exist alternative solutions, this project is meant to be a standard way of managing DNS records for Kubernetes. The current project is a fusion of the following projects and driven by its maintainers:
|
||||
|
||||
1. [Kops DNS Controller](https://github.com/kubernetes/kops/tree/master/dns-controller)
|
||||
1. [Kops DNS Controller](https://github.com/kubernetes/kops/tree/HEAD/dns-controller)
|
||||
2. [Mate](https://github.com/linki/mate)
|
||||
3. [wearemolecule/route53-kubernetes](https://github.com/wearemolecule/route53-kubernetes)
|
||||
|
||||
|
@ -68,7 +68,7 @@ Brief summary of open PRs and what they are trying to address:
|
||||
|
||||
### PRs
|
||||
|
||||
1. https://github.com/kubernetes-sigs/external-dns/pull/243 - first attempt to add support for multiple targets. It is lagging far behind from master tip
|
||||
1. https://github.com/kubernetes-sigs/external-dns/pull/243 - first attempt to add support for multiple targets. It is lagging far behind from tip
|
||||
|
||||
*what it does*: unfinished attempt to extend `Endpoint` struct, for it to allow multiple targets (essentially `target string -> targets []string`)
|
||||
|
||||
@ -78,15 +78,15 @@ Brief summary of open PRs and what they are trying to address:
|
||||
|
||||
*what it does* : attempts to fix issues with `plan` described in `Current Behaviour` section above. Included tests reveal the current problem with `plan`
|
||||
|
||||
*action*: rebase on master and make necessary changes to satisfy requirements listed in this document including back-reference to owning record
|
||||
*action*: rebase on default branch and make necessary changes to satisfy requirements listed in this document including back-reference to owning record
|
||||
|
||||
3. https://github.com/kubernetes-sigs/external-dns/pull/326 - attempt to add multiple target support.
|
||||
|
||||
*what it does*: for each pair `DNS Name` + `Record Type` it aggregates **all** targets from the cluster and passes them to Provider. It adds basic support
|
||||
for DO, Azura, Cloudflare, AWS, GCP, however those are not tested (?). (DNSSimple and Infoblox providers were not updated)
|
||||
for DO, Azure, Cloudflare, AWS, GCP, however those are not tested (?). (DNSSimple and Infoblox providers were not updated)
|
||||
|
||||
*action*: the `plan` logic will probably needs to be reworked, however the rest concerning support in Providers and extending `Endpoint` struct can be reused.
|
||||
Rebase on master and add missing pieces. Depends on `2`.
|
||||
Rebase on default branch and add missing pieces. Depends on `2`.
|
||||
|
||||
Related PRs: https://github.com/kubernetes-sigs/external-dns/pull/331/files, https://github.com/kubernetes-sigs/external-dns/pull/347/files - aiming at AWS Route53 weighted records.
|
||||
These PRs should be considered after common agreement about the way to address multi-target support is achieved. Related discussion: https://github.com/kubernetes-sigs/external-dns/issues/196
|
||||
|
@ -6,6 +6,6 @@ Currently we don't release regularly. Whenever we think it makes sense to releas
|
||||
|
||||
## How to release a new image
|
||||
|
||||
When releasing a new version of external-dns, we tag the branch by using **vX.Y.Z** as tag name. This PR includes the updated **CHANGELOG.md** with the latest commits since last tag. As soon as we merge this PR into master, Kubernetes based CI/CD system [Prow](https://prow.k8s.io/?repo=kubernetes-sigs%2Fexternal-dns) will trigger a job to push the image. We're using the Google Container Registry for our Docker images.
|
||||
When releasing a new version of external-dns, we tag the branch by using **vX.Y.Z** as tag name. This PR includes the updated **CHANGELOG.md** with the latest commits since last tag. As soon as we merge this PR into the default branch, Kubernetes based CI/CD system [Prow](https://prow.k8s.io/?repo=kubernetes-sigs%2Fexternal-dns) will trigger a job to push the image. We're using the Google Container Registry for our Docker images.
|
||||
|
||||
The job itself looks at external-dns `cloudbuild.yaml` and executes the given steps. Inside it runs `make release.staging` which is basically only a `docker build` and `docker push`. The docker image is pushed `gcr.io/k8s-staging-external-dns/external-dns`, which is only a staging image and shouldn't be used. Promoting the official image we need to create another PR in [k8s.io](https://github.com/kubernetes/k8s.io), e.g. https://github.com/kubernetes/k8s.io/pull/540 by taking the current staging image using sha256.
|
||||
|
@ -39,12 +39,14 @@ Providers
|
||||
- [ ] Azure
|
||||
- [ ] Cloudflare
|
||||
- [x] DigitalOcean
|
||||
- [x] DNSimple
|
||||
- [x] Google
|
||||
- [ ] InMemory
|
||||
- [x] Linode
|
||||
- [x] TransIP
|
||||
- [x] RFC2136
|
||||
- [x] Vultr
|
||||
- [x] UltraDNS
|
||||
|
||||
PRs welcome!
|
||||
|
||||
@ -60,6 +62,9 @@ This value is a constant in the provider code.
|
||||
The DigitalOcean Provider overrides the value to 300s when the TTL is 0.
|
||||
This value is a constant in the provider code.
|
||||
|
||||
### DNSimple Provider
|
||||
The DNSimple Provider default TTL is used when the TTL is 0. The default TTL is 3600s.
|
||||
|
||||
### Google Provider
|
||||
Previously with the Google Provider, TTL's were hard-coded to 300s.
|
||||
For safety, the Google Provider overrides the value to 300s when the TTL is 0.
|
||||
@ -76,3 +81,6 @@ The TransIP Provider minimal TTL is used when the TTL is 0. The minimal TTL is 6
|
||||
|
||||
### Vultr Provider
|
||||
The Vultr provider minimal TTL is used when the TTL is 0. The default is 1 hour.
|
||||
|
||||
### UltraDNS
|
||||
The UltraDNS provider minimal TTL is used when the TTL is not provided. The default TTL is account level default TTL, if defined, otherwise 24 hours.
|
||||
|
@ -97,7 +97,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -110,7 +110,7 @@ this Ingress object will only be fronting one backend Service, we might instead
|
||||
create the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
@ -145,7 +145,7 @@ and one AAAA record) for each hostname associated with the Ingress object.
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
|
@ -149,7 +149,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -110,7 +110,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -60,7 +60,7 @@ kiam or kube2iam.
|
||||
### kiam
|
||||
|
||||
If you're using [kiam](https://github.com/uswitch/kiam), follow the
|
||||
[instructions](https://github.com/uswitch/kiam/blob/master/docs/IAM.md) for
|
||||
[instructions](https://github.com/uswitch/kiam/blob/HEAD/docs/IAM.md) for
|
||||
creating the IAM role.
|
||||
|
||||
### kube2iam
|
||||
@ -174,7 +174,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -19,7 +19,7 @@ Therefore, please see the subsequent prerequisites.
|
||||
|
||||
Helm is used to deploy the ingress controller.
|
||||
|
||||
We employ the popular chart [stable/nginx-ingress](https://github.com/helm/charts/tree/master/stable/nginx-ingress).
|
||||
We employ the popular chart [stable/nginx-ingress](https://github.com/helm/charts/tree/HEAD/stable/nginx-ingress).
|
||||
|
||||
```
|
||||
$ helm install stable/nginx-ingress \
|
||||
@ -150,11 +150,14 @@ The credentials of the service principal are provided to ExternalDNS as environm
|
||||
|
||||
### Manifest (for clusters without RBAC enabled)
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: externaldns
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: externaldns
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
@ -196,7 +199,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -216,11 +219,14 @@ subjects:
|
||||
name: externaldns
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: externaldns
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: externaldns
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
@ -267,7 +273,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
---
|
||||
@ -283,11 +289,14 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: externaldns
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: externaldns
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: externaldns
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
@ -326,11 +335,14 @@ $ kubectl create -f externaldns.yaml
|
||||
Create a service file called 'nginx.yaml' with the following contents:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@ -356,7 +368,7 @@ spec:
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginx
|
||||
|
@ -7,7 +7,7 @@ Make sure to use **>=0.5.7** version of ExternalDNS for this tutorial.
|
||||
|
||||
This tutorial uses [Azure CLI 2.0](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) for all
|
||||
Azure commands and assumes that the Kubernetes cluster was created via Azure Container Services and `kubectl` commands
|
||||
are being run on an orchestration master.
|
||||
are being run on an orchestration node.
|
||||
|
||||
## Creating an Azure DNS zone
|
||||
|
||||
@ -167,7 +167,7 @@ Ensure that your nginx-ingress deployment has the following arg: added to it:
|
||||
- --publish-service=namespace/nginx-ingress-controller-svcname
|
||||
```
|
||||
|
||||
For more details see here: [nginx-ingress external-dns](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/faq.md#why-is-externaldns-only-adding-a-single-ip-address-in-route-53-on-aws-when-using-the-nginx-ingress-controller-how-do-i-get-it-to-use-the-fqdn-of-the-elb-assigned-to-my-nginx-ingress-controller-service-instead)
|
||||
For more details see here: [nginx-ingress external-dns](https://github.com/kubernetes-sigs/external-dns/blob/HEAD/docs/faq.md#why-is-externaldns-only-adding-a-single-ip-address-in-route-53-on-aws-when-using-the-nginx-ingress-controller-how-do-i-get-it-to-use-the-fqdn-of-the-elb-assigned-to-my-nginx-ingress-controller-service-instead)
|
||||
|
||||
Connect your `kubectl` client to the cluster you want to test ExternalDNS with.
|
||||
Then apply one of the following manifests file to deploy ExternalDNS.
|
||||
@ -223,7 +223,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -297,7 +297,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
---
|
||||
|
@ -77,7 +77,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -50,7 +50,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -107,7 +107,7 @@ spec:
|
||||
|
||||
### Verify External DNS works (IngressRoute example)
|
||||
The following instructions are based on the
|
||||
[Contour example workload](https://github.com/heptio/contour/blob/master/examples/example-workload/kuard-ingressroute.yaml).
|
||||
[Contour example workload](https://github.com/heptio/contour/blob/HEAD/examples/example-workload/kuard-ingressroute.yaml).
|
||||
|
||||
#### Install a sample service
|
||||
```bash
|
||||
|
@ -24,13 +24,13 @@ helm install stable/etcd-operator --name my-etcd-op
|
||||
```
|
||||
etcd cluster is installed with example yaml from etcd operator website.
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/coreos/etcd-operator/master/example/example-etcd-cluster.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/coreos/etcd-operator/HEAD/example/example-etcd-cluster.yaml
|
||||
```
|
||||
|
||||
### Installing CoreDNS
|
||||
In order to make CoreDNS work with etcd backend, values.yaml of the chart should be changed with corresponding configurations.
|
||||
```
|
||||
wget https://raw.githubusercontent.com/helm/charts/master/stable/coredns/values.yaml
|
||||
wget https://raw.githubusercontent.com/helm/charts/HEAD/stable/coredns/values.yaml
|
||||
```
|
||||
|
||||
You need to edit/patch the file with below diff
|
||||
@ -130,7 +130,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -98,7 +98,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -118,11 +118,14 @@ subjects:
|
||||
name: external-dns
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: external-dns
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: external-dns
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
|
@ -68,7 +68,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -189,3 +189,13 @@ Now that we have verified that ExternalDNS will automatically manage DigitalOcea
|
||||
$ kubectl delete service -f nginx.yaml
|
||||
$ kubectl delete service -f externaldns.yaml
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### API Page Size
|
||||
|
||||
If you have a large number of domains and/or records within a domain, you may encounter API
|
||||
rate limiting because of the number of API calls that external-dns must make to the DigitalOcean API to retrieve
|
||||
the current DNS configuration during every reconciliation loop. If this is the case, use the
|
||||
`--digitalocean-api-page-size` option to increase the size of the pages used when querying the DigitalOcean API.
|
||||
(Note: external-dns uses a default of 50.)
|
||||
|
@ -62,7 +62,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -74,7 +74,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -118,7 +118,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
191
docs/tutorials/hetzner.md
Normal file
191
docs/tutorials/hetzner.md
Normal file
@ -0,0 +1,191 @@
|
||||
# Setting up ExternalDNS for Services on Hetzner DNS
|
||||
|
||||
This tutorial describes how to setup ExternalDNS for usage within a Kubernetes cluster using Hetzner DNS.
|
||||
|
||||
Make sure to use **>=0.7.3** version of ExternalDNS for this tutorial.
|
||||
|
||||
## Creating a Hetzner DNS zone
|
||||
|
||||
If you want to learn about how to use Hetzner's DNS service read the following tutorial series:
|
||||
|
||||
[An Introduction to Managing DNS](https://wiki.hetzner.de/index.php/DNS_Overview), and [Add a new DNS zone](https://wiki.hetzner.de/index.php/Getting_started).
|
||||
|
||||
Create a new DNS zone where you want to create your records in. Let's use `example.com` as an example here.
|
||||
|
||||
## Creating Hetzner Credentials
|
||||
|
||||
Generate a new personal token by going to [the API settings](https://dns.hetzner.com/settings/api-token) or follow [Generating an API access token](https://wiki.hetzner.de/index.php/API_access_token) if you need more information. Give the token a name and choose read and write access. The token needs to be passed to ExternalDNS so make a note of it for later use.
|
||||
|
||||
The environment variable `HETZNER_TOKEN` will be needed to run ExternalDNS with Hetzner.
|
||||
|
||||
## Deploy ExternalDNS
|
||||
|
||||
Connect your `kubectl` client to the cluster you want to test ExternalDNS with.
|
||||
Then apply one of the following manifests file to deploy ExternalDNS.
|
||||
|
||||
### Manifest (for clusters without RBAC enabled)
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: external-dns
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: external-dns
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: external-dns
|
||||
spec:
|
||||
containers:
|
||||
- name: external-dns
|
||||
image: eu.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.7.3
|
||||
args:
|
||||
- --source=service # ingress is also possible
|
||||
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
|
||||
- --provider=hetzner
|
||||
env:
|
||||
- name: HETZNER_TOKEN
|
||||
value: "YOUR_HETZNER_DNS_API_KEY"
|
||||
```
|
||||
|
||||
### Manifest (for clusters with RBAC enabled)
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: external-dns
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: external-dns
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list","watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: external-dns-viewer
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-dns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: external-dns
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: external-dns
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: external-dns
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: external-dns
|
||||
spec:
|
||||
serviceAccountName: external-dns
|
||||
containers:
|
||||
- name: external-dns
|
||||
image: registry.opensource.zalan.do/teapot/external-dns:latest
|
||||
args:
|
||||
- --source=service # ingress is also possible
|
||||
- --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above.
|
||||
- --provider=hetzner
|
||||
env:
|
||||
- name: HETZNER_TOKEN
|
||||
value: "YOUR_HETZNER_DNS_API_KEY"
|
||||
```
|
||||
|
||||
|
||||
## Deploying an Nginx Service
|
||||
|
||||
Create a service file called 'nginx.yaml' with the following contents:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: my-app.example.com
|
||||
spec:
|
||||
selector:
|
||||
app: nginx
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
```
|
||||
|
||||
Note the annotation on the service; use the same hostname as the Hetzner DNS zone created above.
|
||||
|
||||
ExternalDNS uses this annotation to determine what services should be registered with DNS. Removing the annotation will cause ExternalDNS to remove the corresponding DNS records.
|
||||
|
||||
Create the deployment and service:
|
||||
|
||||
```console
|
||||
$ kubectl create -f nginx.yaml
|
||||
```
|
||||
|
||||
Depending where you run your service it can take a little while for your cloud provider to create an external IP for the service.
|
||||
|
||||
Once the service has an external IP assigned, ExternalDNS will notice the new service IP address and synchronize the Hetzner DNS records.
|
||||
|
||||
## Verifying Hetzner DNS records
|
||||
|
||||
Check your [Hetzner DNS UI](https://dns.hetzner.com/) to view the records for your Hetzner DNS zone.
|
||||
|
||||
Click on the zone for the one created above if a different domain was used.
|
||||
|
||||
This should show the external IP address of the service as the A record for your domain.
|
||||
|
||||
## Cleanup
|
||||
|
||||
Now that we have verified that ExternalDNS will automatically manage Hetzner DNS records, we can delete the tutorial's example:
|
||||
|
||||
```
|
||||
$ kubectl delete service -f nginx.yaml
|
||||
$ kubectl delete service -f externaldns.yaml
|
||||
```
|
@ -9,7 +9,7 @@ The main use cases that inspired this feature is the necessity for fixed address
|
||||
|
||||
We will go through a small example of deploying a simple Kafka with use of a headless service.
|
||||
|
||||
### Exernal DNS
|
||||
### External DNS
|
||||
|
||||
A simple deploy could look like this:
|
||||
### Manifest (for clusters without RBAC enabled)
|
||||
@ -17,7 +17,7 @@ A simple deploy could look like this:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: exeternal-dns
|
||||
name: external-dns
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
@ -58,7 +58,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -81,7 +81,7 @@ subjects:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: exeternal-dns
|
||||
name: external-dns
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
@ -111,7 +111,7 @@ spec:
|
||||
|
||||
### Kafka Stateful Set
|
||||
|
||||
First lets deploy a Kafka Stateful set, a simple example(a lot of stuff is missing) with a headless service called `kafka-hsvc`
|
||||
First lets deploy a Kafka Stateful set, a simple example(a lot of stuff is missing) with a headless service called `ksvc`
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1beta1
|
||||
@ -155,7 +155,7 @@ spec:
|
||||
requests:
|
||||
storage: 500Gi
|
||||
```
|
||||
Very important here, is to set the `hostport`(only works if the PodSecurityPolicy allows it)! and in case your app requires an actual hostname inside the container, unlike Kafka, which can advertise on another address, you have to set the hostname yourself.
|
||||
Very important here, is to set the `hostPort`(only works if the PodSecurityPolicy allows it)! and in case your app requires an actual hostname inside the container, unlike Kafka, which can advertise on another address, you have to set the hostname yourself.
|
||||
|
||||
### Headless Service
|
||||
|
||||
|
@ -111,7 +111,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Configuring ExternalDNS to use the Istio Gateway Source
|
||||
# Configuring ExternalDNS to use the Istio Gateway and/or Istio Virtual Service Source
|
||||
This tutorial describes how to configure ExternalDNS to use the Istio Gateway source.
|
||||
It is meant to supplement the other provider-specific setup tutorials.
|
||||
|
||||
@ -32,7 +32,8 @@ spec:
|
||||
args:
|
||||
- --source=service
|
||||
- --source=ingress
|
||||
- --source=istio-gateway
|
||||
- --source=istio-gateway # choose one
|
||||
- --source=istio-virtualservice # or both
|
||||
- --domain-filter=external-dns-test.my-org.com # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones
|
||||
- --provider=aws
|
||||
- --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
|
||||
@ -56,14 +57,14 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: ["networking.istio.io"]
|
||||
resources: ["gateways"]
|
||||
resources: ["gateways", "virtualservices"]
|
||||
verbs: ["get","watch","list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@ -102,6 +103,7 @@ spec:
|
||||
- --source=service
|
||||
- --source=ingress
|
||||
- --source=istio-gateway
|
||||
- --source=istio-virtualservice
|
||||
- --domain-filter=external-dns-test.my-org.com # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones
|
||||
- --provider=aws
|
||||
- --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
|
||||
@ -130,7 +132,7 @@ kubectl patch clusterrole external-dns --type='json' \
|
||||
-p='[{"op": "add", "path": "/rules/4", "value": { "apiGroups": [ "networking.istio.io"], "resources": ["gateways"],"verbs": ["get", "watch", "list" ]} }]'
|
||||
```
|
||||
|
||||
### Verify ExternalDNS works (Gateway example)
|
||||
### Verify that Istio Gateway/VirtualService Source works
|
||||
|
||||
Follow the [Istio ingress traffic tutorial](https://istio.io/docs/tasks/traffic-management/ingress/)
|
||||
to deploy a sample service that will be exposed outside of the service mesh.
|
||||
@ -139,15 +141,16 @@ The following are relevant snippets from that tutorial.
|
||||
#### Install a sample service
|
||||
With automatic sidecar injection:
|
||||
```bash
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.0/samples/httpbin/httpbin.yaml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.6/samples/httpbin/httpbin.yaml
|
||||
```
|
||||
|
||||
Otherwise:
|
||||
```bash
|
||||
$ kubectl apply -f <(istioctl kube-inject -f https://raw.githubusercontent.com/istio/istio/release-1.0/samples/httpbin/httpbin.yaml)
|
||||
$ kubectl apply -f <(istioctl kube-inject -f https://raw.githubusercontent.com/istio/istio/release-1.6/samples/httpbin/httpbin.yaml)
|
||||
```
|
||||
|
||||
#### Create an Istio Gateway:
|
||||
#### Using a Gateway as a source
|
||||
##### Create an Istio Gateway:
|
||||
```bash
|
||||
$ cat <<EOF | kubectl apply -f -
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
@ -163,11 +166,11 @@ spec:
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "httpbin.example.com"
|
||||
- "httpbin.example.com" # this is used by external-dns to extract DNS names
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Configure routes for traffic entering via the Gateway:
|
||||
##### Configure routes for traffic entering via the Gateway:
|
||||
```bash
|
||||
$ cat <<EOF | kubectl apply -f -
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
@ -178,7 +181,56 @@ spec:
|
||||
hosts:
|
||||
- "httpbin.example.com"
|
||||
gateways:
|
||||
- httpbin-gateway
|
||||
- istio-system/httpbin-gateway
|
||||
http:
|
||||
- match:
|
||||
- uri:
|
||||
prefix: /status
|
||||
- uri:
|
||||
prefix: /delay
|
||||
route:
|
||||
- destination:
|
||||
port:
|
||||
number: 8000
|
||||
host: httpbin
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Using a VirtualService as a source
|
||||
|
||||
##### Create an Istio Gateway:
|
||||
```bash
|
||||
$ cat <<EOF | kubectl apply -f -
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: httpbin-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway # use Istio default gateway implementation
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
EOF
|
||||
```
|
||||
|
||||
##### Configure routes for traffic entering via the Gateway:
|
||||
```bash
|
||||
$ cat <<EOF | kubectl apply -f -
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: httpbin
|
||||
spec:
|
||||
hosts:
|
||||
- "httpbin.example.com" # this is used by external-dns to extract DNS names
|
||||
gateways:
|
||||
- istio-system/httpbin-gateway
|
||||
http:
|
||||
- match:
|
||||
- uri:
|
||||
|
@ -15,7 +15,7 @@ this is not required.
|
||||
For help setting up the Kubernetes Ingress AWS Controller, that can
|
||||
create ALBs and NLBs, follow the [Setup Guide][2].
|
||||
|
||||
[2]: https://github.com/zalando-incubator/kube-ingress-aws-controller/tree/master/deploy
|
||||
[2]: https://github.com/zalando-incubator/kube-ingress-aws-controller/tree/HEAD/deploy
|
||||
|
||||
|
||||
### Optional RouteGroup
|
||||
@ -26,7 +26,7 @@ create ALBs and NLBs, follow the [Setup Guide][2].
|
||||
First, you have to apply the RouteGroup CRD to your cluster:
|
||||
|
||||
```
|
||||
kubectl apply -f https://github.com/zalando/skipper/blob/master/dataclients/kubernetes/deploy/apply/routegroups_crd.yaml
|
||||
kubectl apply -f https://github.com/zalando/skipper/blob/HEAD/dataclients/kubernetes/deploy/apply/routegroups_crd.yaml
|
||||
```
|
||||
|
||||
You have to grant all controllers: [Skipper][4],
|
||||
@ -43,6 +43,7 @@ metadata:
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
@ -51,6 +52,7 @@ rules:
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
@ -170,7 +172,7 @@ this Ingress object will only be fronting one backend Service, we might instead
|
||||
create the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
@ -203,7 +205,7 @@ and one AAAA record) for each hostname associated with the Ingress object.
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
@ -237,7 +239,7 @@ set to `nlb` then ExternalDNS will create an NLB instead of an ALB.
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
|
@ -67,7 +67,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -171,7 +171,7 @@ Once the service has an external IP assigned, ExternalDNS will notice the new se
|
||||
|
||||
## Verifying Linode DNS records
|
||||
|
||||
Check your [Linode UI](https://manager.linode.com/dns) to view the records for your Linode DNS zone.
|
||||
Check your [Linode UI](https://cloud.linode.com/domains) to view the records for your Linode DNS zone.
|
||||
|
||||
Click on the zone for the one created above if a different domain was used.
|
||||
|
||||
|
@ -224,7 +224,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -87,7 +87,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -51,7 +51,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
@ -105,7 +105,7 @@ spec:
|
||||
|
||||
### Verify External DNS works (OpenShift Route example)
|
||||
The following instructions are based on the
|
||||
[Hello Openshift](https://github.com/openshift/origin/tree/master/examples/hello-openshift).
|
||||
[Hello Openshift](https://github.com/openshift/origin/tree/HEAD/examples/hello-openshift).
|
||||
|
||||
#### Install a sample service and expose it
|
||||
```bash
|
||||
|
@ -53,7 +53,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -58,7 +58,7 @@ curl -XPOST -H "X-Ovh-Application: <ApplicationKey>" -H "Content-type: applicati
|
||||
"path": "/domain/zone/*/refresh"
|
||||
}
|
||||
],
|
||||
"redirection":"https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/ovh.md#creating-ovh-credentials"
|
||||
"redirection":"https://github.com/kubernetes-sigs/external-dns/blob/HEAD/docs/tutorials/ovh.md#creating-ovh-credentials"
|
||||
}'
|
||||
```
|
||||
|
||||
@ -119,7 +119,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -78,7 +78,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -82,7 +82,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -78,7 +78,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -152,6 +152,7 @@ rules:
|
||||
- list
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
|
@ -69,7 +69,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
620
docs/tutorials/ultradns.md
Normal file
620
docs/tutorials/ultradns.md
Normal file
@ -0,0 +1,620 @@
|
||||
# Setting up ExternalDNS for Services on UltraDNS
|
||||
|
||||
This tutorial describes how to setup ExternalDNS for usage within a Kubernetes cluster using UltraDNS.
|
||||
|
||||
For this tutorial, please make sure that you are using a version **> 0.7.2** of ExternalDNS.
|
||||
|
||||
## Managing DNS with UltraDNS
|
||||
|
||||
If you would like to read-up on the UltraDNS service, you can find additional details here: [Introduction to UltraDNS](https://docs.ultradns.neustar)
|
||||
|
||||
Before proceeding, please create a new DNS Zone that you will create your records in for this tutorial process. For the examples in this tutorial, we will be using `example.com` as our Zone.
|
||||
|
||||
## Setting Up UltraDNS Credentials
|
||||
|
||||
The following environment variables will be needed to run ExternalDNS with UltraDNS.
|
||||
|
||||
`ULTRADNS_USERNAME`,`ULTRADNS_PASSWORD`, &`ULTRADNS_BASEURL`
|
||||
`ULTRADNS_ACCOUNTNAME`(optional variable).
|
||||
|
||||
## Deploying ExternalDNS
|
||||
|
||||
Connect your `kubectl` client to the cluster you want to test ExternalDNS with.
|
||||
Then, apply one of the following manifests file to deploy ExternalDNS.
|
||||
|
||||
- Note: We are assuming the zone is already present within UltraDNS.
|
||||
- Note: While creating CNAMES as target endpoints, the `--txt-prefix` option is mandatory.
|
||||
### Manifest (for clusters without RBAC enabled)
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: external-dns
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: external-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: external-dns
|
||||
spec:
|
||||
containers:
|
||||
- name: external-dns
|
||||
image: registry.opensource.zalan.do/teapot/external-dns:latest
|
||||
args:
|
||||
- --source=service
|
||||
- --source=ingress # ingress is also possible
|
||||
- --domain-filter=example.com # (Recommended) We recommend to use this filter as it minimize the time to propagate changes, as there are less number of zones to look into..
|
||||
- --provider=ultradns
|
||||
- --txt-prefix=txt-
|
||||
env:
|
||||
- name: ULTRADNS_USERNAME
|
||||
value: ""
|
||||
- name: ULTRADNS_PASSWORD # The password is required to be BASE64 encrypted.
|
||||
value: ""
|
||||
- name: ULTRADNS_BASEURL
|
||||
value: "https://api.ultradns.com/"
|
||||
- name: ULTRADNS_ACCOUNTNAME
|
||||
value: ""
|
||||
```
|
||||
|
||||
### Manifest (for clusters with RBAC enabled)
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: external-dns
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: external-dns
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list","watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: external-dns-viewer
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-dns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: external-dns
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: external-dns
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: external-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: external-dns
|
||||
spec:
|
||||
serviceAccountName: external-dns
|
||||
containers:
|
||||
- name: external-dns
|
||||
image: registry.opensource.zalan.do/teapot/external-dns:latest
|
||||
args:
|
||||
- --source=service
|
||||
- --source=ingress
|
||||
- --domain-filter=example.com #(Recommended) We recommend to use this filter as it minimize the time to propagate changes, as there are less number of zones to look into..
|
||||
- --provider=ultradns
|
||||
- --txt-prefix=txt-
|
||||
env:
|
||||
- name: ULTRADNS_USERNAME
|
||||
value: ""
|
||||
- name: ULTRADNS_PASSWORD # The password is required to be BASE64 encrypted.
|
||||
value: ""
|
||||
- name: ULTRADNS_BASEURL
|
||||
value: "https://api.ultradns.com/"
|
||||
- name: ULTRADNS_ACCOUNTNAME
|
||||
value: ""
|
||||
```
|
||||
|
||||
## Deploying an Nginx Service
|
||||
|
||||
Create a service file called 'nginx.yaml' with the following contents:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: my-app.example.com.
|
||||
spec:
|
||||
selector:
|
||||
app: nginx
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
```
|
||||
|
||||
Please note the annotation on the service. Use the same hostname as the UltraDNS zone created above.
|
||||
|
||||
ExternalDNS uses this annotation to determine what services should be registered with DNS. Removing the annotation will cause ExternalDNS to remove the corresponding DNS records.
|
||||
|
||||
## Creating the Deployment and Service:
|
||||
|
||||
```console
|
||||
$ kubectl create -f nginx.yaml
|
||||
$ kubectl create -f external-dns.yaml
|
||||
```
|
||||
|
||||
Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
|
||||
|
||||
Once the service has an external IP assigned, ExternalDNS will notice the new service IP address and will synchronize the UltraDNS records.
|
||||
|
||||
## Verifying UltraDNS Records
|
||||
|
||||
Please verify on the [UltraDNS UI](https://portal.ultradns.neustar) that the records are created under the zone "example.com".
|
||||
|
||||
For more information on UltraDNS UI, refer to (https://docs.ultradns.neustar/mspuserguide.html).
|
||||
|
||||
Select the zone that was created above (or select the appropriate zone if a different zone was used.)
|
||||
|
||||
The external IP address will be displayed as a CNAME record for your zone.
|
||||
|
||||
## Cleaning Up the Deployment and Service
|
||||
|
||||
Now that we have verified that ExternalDNS will automatically manage your UltraDNS records, you can delete example zones that you created in this tutorial:
|
||||
|
||||
```
|
||||
$ kubectl delete service -f nginx.yaml
|
||||
$ kubectl delete service -f externaldns.yaml
|
||||
```
|
||||
## Examples to Manage your Records
|
||||
### Creating Multiple A Records Target
|
||||
- First, you want to create a service file called 'apple-banana-echo.yaml'
|
||||
```yaml
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app
|
||||
labels:
|
||||
app: apple
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service
|
||||
spec:
|
||||
selector:
|
||||
app: apple
|
||||
ports:
|
||||
- port: 5678 # Default port for image
|
||||
```
|
||||
- Then, create service file called 'expose-apple-banana-app.yaml' to expose the services. For more information to deploy ingress controller, refer to (https://kubernetes.github.io/ingress-nginx/deploy/)
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /
|
||||
ingress.kubernetes.io/scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
|
||||
external-dns.alpha.kubernetes.io/target: 10.10.10.1,10.10.10.23
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
```
|
||||
- Then, create the deployment and service:
|
||||
```console
|
||||
$ kubectl create -f apple-banana-echo.yaml
|
||||
$ kubectl create -f expose-apple-banana-app.yaml
|
||||
$ kubectl create -f external-dns.yaml
|
||||
```
|
||||
- Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
|
||||
- Please verify on the [UltraDNS UI](https://portal.ultradns.neustar) that the records have been created under the zone "example.com".
|
||||
- Finally, you will need to clean up the deployment and service. Please verify on the UI afterwards that the records have been deleted from the zone ‘example.com’:
|
||||
```console
|
||||
$ kubectl delete -f apple-banana-echo.yaml
|
||||
$ kubectl delete -f expose-apple-banana-app.yaml
|
||||
$ kubectl delete -f external-dns.yaml
|
||||
```
|
||||
### Creating CNAME Record
|
||||
- Please note, that prior to deploying the external-dns service, you will need to add the option –txt-prefix=txt- into external-dns.yaml. If this not provided, your records will not be created.
|
||||
- First, create a service file called 'apple-banana-echo.yaml'
|
||||
- _Config File Example – kubernetes cluster is on-premise not on cloud_
|
||||
```yaml
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app
|
||||
labels:
|
||||
app: apple
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service
|
||||
spec:
|
||||
selector:
|
||||
app: apple
|
||||
ports:
|
||||
- port: 5678 # Default port for image
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /
|
||||
ingress.kubernetes.io/scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
|
||||
external-dns.alpha.kubernetes.io/target: apple.cname.com.
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
```
|
||||
- _Config File Example – Kubernetes cluster service from different cloud vendors_
|
||||
```yaml
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app
|
||||
labels:
|
||||
app: apple
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: my-app.example.com.
|
||||
spec:
|
||||
selector:
|
||||
app: apple
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5678
|
||||
targetPort: 5678
|
||||
```
|
||||
- Then, create the deployment and service:
|
||||
```console
|
||||
$ kubectl create -f apple-banana-echo.yaml
|
||||
$ kubectl create -f external-dns.yaml
|
||||
```
|
||||
- Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
|
||||
- Please verify on the [UltraDNS UI](https://portal.ultradns.neustar), that the records have been created under the zone "example.com".
|
||||
- Finally, you will need to clean up the deployment and service. Please verify on the UI afterwards that the records have been deleted from the zone "example.com":
|
||||
```console
|
||||
$ kubectl delete -f apple-banana-echo.yaml
|
||||
$ kubectl delete -f external-dns.yaml
|
||||
```
|
||||
### Creating Multiple Types Of Records
|
||||
- Please note, that prior to deploying the external-dns service, you will need to add the option –txt-prefix=txt- into external-dns.yaml. Since you will also be created a CNAME record, If this not provided, your records will not be created.
|
||||
- First, create a service file called 'apple-banana-echo.yaml'
|
||||
- _Config File Example – kubernetes cluster is on-premise not on cloud_
|
||||
```yaml
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app
|
||||
labels:
|
||||
app: apple
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service
|
||||
spec:
|
||||
selector:
|
||||
app: apple
|
||||
ports:
|
||||
- port: 5678 # Default port for image
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app1
|
||||
labels:
|
||||
app: apple1
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app1
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service1
|
||||
spec:
|
||||
selector:
|
||||
app: apple1
|
||||
ports:
|
||||
- port: 5679 # Default port for image
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app2
|
||||
labels:
|
||||
app: apple2
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app2
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service2
|
||||
spec:
|
||||
selector:
|
||||
app: apple2
|
||||
ports:
|
||||
- port: 5680 # Default port for image
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /
|
||||
ingress.kubernetes.io/scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
|
||||
external-dns.alpha.kubernetes.io/target: apple.cname.com.
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress1
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /
|
||||
ingress.kubernetes.io/scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: apple-banana.example.com.
|
||||
external-dns.alpha.kubernetes.io/target: 10.10.10.3
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service1
|
||||
servicePort: 5679
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress2
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /
|
||||
ingress.kubernetes.io/scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: banana.example.com.
|
||||
external-dns.alpha.kubernetes.io/target: 10.10.10.3,10.10.10.20
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service2
|
||||
servicePort: 5680
|
||||
```
|
||||
- _Config File Example – Kubernetes cluster service from different cloud vendors_
|
||||
```yaml
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: my-app.example.com.
|
||||
spec:
|
||||
selector:
|
||||
app: nginx
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app
|
||||
labels:
|
||||
app: apple
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service
|
||||
spec:
|
||||
selector:
|
||||
app: apple
|
||||
ports:
|
||||
- port: 5678 # Default port for image
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-app1
|
||||
labels:
|
||||
app: apple1
|
||||
spec:
|
||||
containers:
|
||||
- name: example-app1
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=apple"
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: example-service1
|
||||
spec:
|
||||
selector:
|
||||
app: apple1
|
||||
ports:
|
||||
- port: 5679 # Default port for image
|
||||
---
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /
|
||||
ingress.kubernetes.io/scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: apple.example.com.
|
||||
external-dns.alpha.kubernetes.io/target: 10.10.10.3,10.10.10.25
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service
|
||||
servicePort: 5678
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress1
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /
|
||||
ingress.kubernetes.io/scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: apple-banana.example.com.
|
||||
external-dns.alpha.kubernetes.io/target: 10.10.10.3
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /apple
|
||||
backend:
|
||||
serviceName: example-service1
|
||||
servicePort: 5679
|
||||
```
|
||||
- Then, create the deployment and service:
|
||||
```console
|
||||
$ kubectl create -f apple-banana-echo.yaml
|
||||
$ kubectl create -f external-dns.yaml
|
||||
```
|
||||
- Depending on where you run your service from, it can take a few minutes for your cloud provider to create an external IP for the service.
|
||||
- Please verify on the [UltraDNS UI](https://portal.ultradns.neustar), that the records have been created under the zone "example.com".
|
||||
- Finally, you will need to clean up the deployment and service. Please verify on the UI afterwards that the records have been deleted from the zone "example.com":
|
||||
```console
|
||||
$ kubectl delete -f apple-banana-echo.yaml
|
||||
$ kubectl delete -f external-dns.yaml```
|
@ -99,7 +99,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
@ -68,7 +68,7 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services","endpoints","pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
- apiGroups: ["extensions","networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
|
84
go.mod
84
go.mod
@ -3,67 +3,77 @@ module sigs.k8s.io/external-dns
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.44.3
|
||||
cloud.google.com/go v0.50.0
|
||||
git.blindage.org/21h/hcloud-dns v0.0.0-20200525170043-def10a4a28e0
|
||||
github.com/Azure/azure-sdk-for-go v36.0.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.9.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.6.0
|
||||
github.com/Azure/go-autorest/autorest v0.9.4
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.0.0-00010101000000-000000000000
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0
|
||||
github.com/akamai/AkamaiOPEN-edgegrid-golang v0.9.11
|
||||
github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 // indirect
|
||||
github.com/alecthomas/colour v0.1.0 // indirect
|
||||
github.com/alecthomas/kingpin v2.2.5+incompatible
|
||||
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 // indirect
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20180828111155-cad214d7d71f
|
||||
github.com/aws/aws-sdk-go v1.27.4
|
||||
github.com/alecthomas/repr v0.0.0-20200325044227-4184120f674c // indirect
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v1.61.357
|
||||
github.com/aws/aws-sdk-go v1.31.4
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/cloudflare/cloudflare-go v0.10.1
|
||||
github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381
|
||||
github.com/denverdino/aliyungo v0.0.0-20180815121905-69560d9530f5
|
||||
github.com/digitalocean/godo v1.34.0
|
||||
github.com/dnaeon/go-vcr v1.0.1 // indirect
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba
|
||||
github.com/digitalocean/godo v1.36.0
|
||||
github.com/dnsimple/dnsimple-go v0.60.0
|
||||
github.com/exoscale/egoscale v0.18.1
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/ffledgling/pdns-go v0.0.0-20180219074714-524e7daccd99
|
||||
github.com/go-resty/resty v1.8.0 // indirect
|
||||
github.com/gobs/pretty v0.0.0-20180724170744-09732c25a95b // indirect
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
|
||||
github.com/golang/sync v0.0.0-20180314180146-1d60e4601c6f
|
||||
github.com/google/go-cmp v0.4.1
|
||||
github.com/gophercloud/gophercloud v0.1.0
|
||||
github.com/gorilla/mux v1.7.4 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/heptio/contour v0.15.0
|
||||
github.com/infobloxopen/infoblox-go-client v0.0.0-20180606155407-61dc5f9b0a65
|
||||
github.com/linki/instrumented_http v0.2.0
|
||||
github.com/linode/linodego v0.3.0
|
||||
github.com/mattn/go-isatty v0.0.11 // indirect
|
||||
github.com/linode/linodego v0.19.0
|
||||
github.com/maxatome/go-testdeep v1.4.0
|
||||
github.com/miekg/dns v1.1.25
|
||||
github.com/miekg/dns v1.1.30
|
||||
github.com/nesv/go-dynect v0.6.0
|
||||
github.com/nic-at/rc0go v1.1.0
|
||||
github.com/openshift/api v0.0.0-20190322043348-8741ff068a47
|
||||
github.com/openshift/client-go v3.9.0+incompatible
|
||||
github.com/oracle/oci-go-sdk v1.8.0
|
||||
github.com/nic-at/rc0go v1.1.1
|
||||
github.com/openshift/api v0.0.0-20200605231317-fb2a6ca106ae
|
||||
github.com/openshift/client-go v0.0.0-20200608144219-584632b8fc73
|
||||
github.com/oracle/oci-go-sdk v21.4.0+incompatible
|
||||
github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.0.0
|
||||
github.com/projectcontour/contour v1.5.0
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/sanyu/dynectsoap v0.0.0-20181203081243-b83de5edc4e0
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/sergi/go-diff v1.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/smartystreets/assertions v1.0.1 // indirect
|
||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 // indirect
|
||||
github.com/smartystreets/gunit v1.1.1 // indirect
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/smartystreets/gunit v1.3.4 // indirect
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/terra-farm/udnssdk v1.3.5 // indirect
|
||||
github.com/transip/gotransip v5.8.2+incompatible
|
||||
github.com/ultradns/ultradns-sdk-go v0.0.0-20200616202852-e62052662f60
|
||||
github.com/vinyldns/go-vinyldns v0.0.0-20190611170422-7119fe55ed92
|
||||
github.com/vultr/govultr v0.3.2
|
||||
github.com/vinyldns/go-vinyldns v0.0.0-20200211145900-fe8a3d82e556
|
||||
github.com/vultr/govultr v0.4.2
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200401174654-e694b7bb0875
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
google.golang.org/api v0.9.0
|
||||
go.uber.org/ratelimit v0.1.0
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
google.golang.org/api v0.15.0
|
||||
gopkg.in/ns1/ns1-go.v2 v2.0.0-20190322154155-0dafb5275fd1
|
||||
gopkg.in/yaml.v2 v2.2.5
|
||||
istio.io/api v0.0.0-20190820204432-483f2547d882
|
||||
istio.io/istio v0.0.0-20190322063008-2b1331886076
|
||||
k8s.io/api v0.0.0-20190620084959-7cf5895f2711
|
||||
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719
|
||||
k8s.io/client-go v10.0.0+incompatible
|
||||
k8s.io/klog v0.3.1
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
istio.io/api v0.0.0-20200529165953-72dad51d4ffc
|
||||
istio.io/client-go v0.0.0-20200529172309-31c16ea3f751
|
||||
k8s.io/api v0.18.3
|
||||
k8s.io/apimachinery v0.18.3
|
||||
k8s.io/client-go v0.18.3
|
||||
)
|
||||
|
||||
replace (
|
||||
@ -72,11 +82,7 @@ replace (
|
||||
github.com/Azure/go-autorest/autorest/adal => github.com/Azure/go-autorest/autorest/adal v0.6.0
|
||||
github.com/Azure/go-autorest/autorest/azure/auth => github.com/Azure/go-autorest/autorest/azure/auth v0.3.0
|
||||
github.com/golang/glog => github.com/kubermatic/glog-logrus v0.0.0-20180829085450-3fa5b9870d1d
|
||||
istio.io/api => istio.io/api v0.0.0-20190820204432-483f2547d882
|
||||
istio.io/istio => istio.io/istio v0.0.0-20190911205955-c2bd59595ce6
|
||||
k8s.io/api => k8s.io/api v0.0.0-20190817221950-ebce17126a01
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190919022157-e8460a76b3ad
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190817221809-bf4de9df677c
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20190817222206-ee6c071a42cf
|
||||
// TODO(jpg): Pin gRPC to work around breaking change until all dependences are upgraded: https://github.com/etcd-io/etcd/issues/11563
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
||||
k8s.io/klog => github.com/mikkeloscar/knolog v0.0.0-20190326191552-80742771eb6b
|
||||
)
|
||||
|
4
internal/config/config.go
Normal file
4
internal/config/config.go
Normal file
@ -0,0 +1,4 @@
|
||||
package config
|
||||
|
||||
// FastPoll used for fast testing
|
||||
var FastPoll = false
|
@ -39,7 +39,6 @@ func (b byAllFields) Less(i, j int) bool {
|
||||
return b[i].RecordType <= b[j].RecordType
|
||||
}
|
||||
return b[i].Targets.String() <= b[j].Targets.String()
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
23
internal/testutils/init.go
Normal file
23
internal/testutils/init.go
Normal file
@ -0,0 +1,23 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"log"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"sigs.k8s.io/external-dns/internal/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config.FastPoll = true
|
||||
if os.Getenv("DEBUG") == "" {
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
log.SetOutput(ioutil.Discard)
|
||||
} else {
|
||||
if level, err := logrus.ParseLevel(os.Getenv("DEBUG")); err == nil {
|
||||
logrus.SetLevel(level)
|
||||
}
|
||||
}
|
||||
}
|
@ -17,9 +17,11 @@ limitations under the License.
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
)
|
||||
|
||||
@ -29,7 +31,7 @@ type MockSource struct {
|
||||
}
|
||||
|
||||
// Endpoints returns the desired mock endpoints.
|
||||
func (m *MockSource) Endpoints() ([]*endpoint.Endpoint, error) {
|
||||
func (m *MockSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
args := m.Called()
|
||||
|
||||
endpoints := args.Get(0)
|
||||
@ -40,21 +42,18 @@ func (m *MockSource) Endpoints() ([]*endpoint.Endpoint, error) {
|
||||
return endpoints.([]*endpoint.Endpoint), args.Error(1)
|
||||
}
|
||||
|
||||
// AddEventHandler adds an event handler function that's called when sources that support such a thing have changed.
|
||||
func (m *MockSource) AddEventHandler(handler func() error, stopChan <-chan struct{}, minInterval time.Duration) {
|
||||
// Execute callback handler no more than once per minInterval, until a message on stopChan is received.
|
||||
// AddEventHandler adds an event handler that should be triggered if something in source changes
|
||||
func (m *MockSource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
go func() {
|
||||
var lastCallbackTime time.Time
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stopChan:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
now := time.Now()
|
||||
if now.After(lastCallbackTime.Add(minInterval)) {
|
||||
handler()
|
||||
lastCallbackTime = time.Now()
|
||||
}
|
||||
case <-ticker.C:
|
||||
handler()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
20
kustomize/external-dns-clusterrole.yaml
Normal file
20
kustomize/external-dns-clusterrole.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: external-dns
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get","watch","list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get","watch","list"]
|
12
kustomize/external-dns-clusterrolebinding.yaml
Normal file
12
kustomize/external-dns-clusterrolebinding.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: external-dns-viewer
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-dns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: external-dns
|
||||
namespace: default
|
23
kustomize/external-dns-deployment.yaml
Normal file
23
kustomize/external-dns-deployment.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: external-dns
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: external-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: external-dns
|
||||
spec:
|
||||
serviceAccountName: external-dns
|
||||
containers:
|
||||
- name: external-dns
|
||||
image: us.gcr.io/k8s-artifacts-prod/external-dns/external-dns:v0.7.2
|
||||
args:
|
||||
- --source=service
|
||||
- --source=ingress
|
||||
- --registry=txt
|
4
kustomize/external-dns-serviceaccount.yaml
Normal file
4
kustomize/external-dns-serviceaccount.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: external-dns
|
5
kustomize/kustomization.yaml
Normal file
5
kustomize/kustomization.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
resources:
|
||||
- ./external-dns-deployment.yaml
|
||||
- ./external-dns-serviceaccount.yaml
|
||||
- ./external-dns-clusterrole.yaml
|
||||
- ./external-dns-clusterrolebinding.yaml
|
60
main.go
60
main.go
@ -28,6 +28,12 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"sigs.k8s.io/external-dns/controller"
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
|
||||
"sigs.k8s.io/external-dns/pkg/apis/externaldns/validation"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
"sigs.k8s.io/external-dns/provider/akamai"
|
||||
"sigs.k8s.io/external-dns/provider/alibabacloud"
|
||||
"sigs.k8s.io/external-dns/provider/aws"
|
||||
@ -41,6 +47,7 @@ import (
|
||||
"sigs.k8s.io/external-dns/provider/dyn"
|
||||
"sigs.k8s.io/external-dns/provider/exoscale"
|
||||
"sigs.k8s.io/external-dns/provider/google"
|
||||
"sigs.k8s.io/external-dns/provider/hetzner"
|
||||
"sigs.k8s.io/external-dns/provider/infoblox"
|
||||
"sigs.k8s.io/external-dns/provider/inmemory"
|
||||
"sigs.k8s.io/external-dns/provider/linode"
|
||||
@ -52,15 +59,9 @@ import (
|
||||
"sigs.k8s.io/external-dns/provider/rdns"
|
||||
"sigs.k8s.io/external-dns/provider/rfc2136"
|
||||
"sigs.k8s.io/external-dns/provider/transip"
|
||||
"sigs.k8s.io/external-dns/provider/ultradns"
|
||||
"sigs.k8s.io/external-dns/provider/vinyldns"
|
||||
"sigs.k8s.io/external-dns/provider/vultr"
|
||||
|
||||
"sigs.k8s.io/external-dns/controller"
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
|
||||
"sigs.k8s.io/external-dns/pkg/apis/externaldns/validation"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
"sigs.k8s.io/external-dns/registry"
|
||||
"sigs.k8s.io/external-dns/source"
|
||||
)
|
||||
@ -89,12 +90,10 @@ func main() {
|
||||
}
|
||||
log.SetLevel(ll)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
stopChan := make(chan struct{}, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go serveMetrics(cfg.MetricsAddress)
|
||||
go handleSigterm(stopChan)
|
||||
go handleSigterm(cancel)
|
||||
|
||||
// Create a source.Config from the flags passed by the user.
|
||||
sourceCfg := &source.Config{
|
||||
@ -111,9 +110,8 @@ func main() {
|
||||
CRDSourceAPIVersion: cfg.CRDSourceAPIVersion,
|
||||
CRDSourceKind: cfg.CRDSourceKind,
|
||||
KubeConfig: cfg.KubeConfig,
|
||||
KubeMaster: cfg.Master,
|
||||
APIServerURL: cfg.APIServerURL,
|
||||
ServiceTypeFilter: cfg.ServiceTypeFilter,
|
||||
IstioIngressGatewayServices: cfg.IstioIngressGatewayServices,
|
||||
CFAPIEndpoint: cfg.CFAPIEndpoint,
|
||||
CFUsername: cfg.CFUsername,
|
||||
CFPassword: cfg.CFPassword,
|
||||
@ -124,8 +122,8 @@ func main() {
|
||||
|
||||
// Lookup all the selected sources by names and pass them the desired configuration.
|
||||
sources, err := source.ByNames(&source.SingletonClientGenerator{
|
||||
KubeConfig: cfg.KubeConfig,
|
||||
KubeMaster: cfg.Master,
|
||||
KubeConfig: cfg.KubeConfig,
|
||||
APIServerURL: cfg.APIServerURL,
|
||||
// If update events are enabled, disable timeout.
|
||||
RequestTimeout: func() time.Duration {
|
||||
if cfg.UpdateEvents {
|
||||
@ -193,6 +191,8 @@ func main() {
|
||||
p, err = vinyldns.NewVinylDNSProvider(domainFilter, zoneIDFilter, cfg.DryRun)
|
||||
case "vultr":
|
||||
p, err = vultr.NewVultrProvider(domainFilter, cfg.DryRun)
|
||||
case "ultradns":
|
||||
p, err = ultradns.NewUltraDNSProvider(domainFilter, cfg.DryRun)
|
||||
case "cloudflare":
|
||||
p, err = cloudflare.NewCloudFlareProvider(domainFilter, zoneIDFilter, cfg.CloudflareZonesPerPage, cfg.CloudflareProxied, cfg.DryRun)
|
||||
case "rcodezero":
|
||||
@ -200,9 +200,11 @@ func main() {
|
||||
case "google":
|
||||
p, err = google.NewGoogleProvider(ctx, cfg.GoogleProject, domainFilter, zoneIDFilter, cfg.GoogleBatchChangeSize, cfg.GoogleBatchChangeInterval, cfg.DryRun)
|
||||
case "digitalocean":
|
||||
p, err = digitalocean.NewDigitalOceanProvider(ctx, domainFilter, cfg.DryRun)
|
||||
p, err = digitalocean.NewDigitalOceanProvider(ctx, domainFilter, cfg.DryRun, cfg.DigitalOceanAPIPageSize)
|
||||
case "hetzner":
|
||||
p, err = hetzner.NewHetznerProvider(ctx, domainFilter, cfg.DryRun)
|
||||
case "ovh":
|
||||
p, err = ovh.NewOVHProvider(ctx, domainFilter, cfg.OVHEndpoint, cfg.DryRun)
|
||||
p, err = ovh.NewOVHProvider(ctx, domainFilter, cfg.OVHEndpoint, cfg.OVHApiRateLimit, cfg.DryRun)
|
||||
case "linode":
|
||||
p, err = linode.NewLinodeProvider(domainFilter, cfg.DryRun, externaldns.Version)
|
||||
case "dnsimple":
|
||||
@ -300,7 +302,7 @@ func main() {
|
||||
case "noop":
|
||||
r, err = registry.NewNoopRegistry(p)
|
||||
case "txt":
|
||||
r, err = registry.NewTXTRegistry(p, cfg.TXTPrefix, cfg.TXTOwnerID, cfg.TXTCacheInterval)
|
||||
r, err = registry.NewTXTRegistry(p, cfg.TXTPrefix, cfg.TXTSuffix, cfg.TXTOwnerID, cfg.TXTCacheInterval)
|
||||
case "aws-sd":
|
||||
r, err = registry.NewAWSSDRegistry(p.(*awssd.AWSSDProvider), cfg.TXTOwnerID)
|
||||
default:
|
||||
@ -324,13 +326,6 @@ func main() {
|
||||
DomainFilter: domainFilter,
|
||||
}
|
||||
|
||||
if cfg.UpdateEvents {
|
||||
// Add RunOnce as the handler function that will be called when ingress/service sources have changed.
|
||||
// Note that k8s Informers will perform an initial list operation, which results in the handler
|
||||
// function initially being called for every Service/Ingress that exists limted by minInterval.
|
||||
ctrl.Source.AddEventHandler(func() error { return ctrl.RunOnce(ctx) }, stopChan, 1*time.Minute)
|
||||
}
|
||||
|
||||
if cfg.Once {
|
||||
err := ctrl.RunOnce(ctx)
|
||||
if err != nil {
|
||||
@ -339,15 +334,24 @@ func main() {
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
ctrl.Run(ctx, stopChan)
|
||||
|
||||
if cfg.UpdateEvents {
|
||||
// Add RunOnce as the handler function that will be called when ingress/service sources have changed.
|
||||
// Note that k8s Informers will perform an initial list operation, which results in the handler
|
||||
// function initially being called for every Service/Ingress that exists
|
||||
ctrl.Source.AddEventHandler(ctx, func() { ctrl.ScheduleRunOnce(time.Now()) })
|
||||
}
|
||||
|
||||
ctrl.ScheduleRunOnce(time.Now())
|
||||
ctrl.Run(ctx)
|
||||
}
|
||||
|
||||
func handleSigterm(stopChan chan struct{}) {
|
||||
func handleSigterm(cancel func()) {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
<-signals
|
||||
log.Info("Received SIGTERM. Terminating...")
|
||||
close(stopChan)
|
||||
cancel()
|
||||
}
|
||||
|
||||
func serveMetrics(address string) {
|
||||
|
@ -38,7 +38,7 @@ var (
|
||||
|
||||
// Config is a project-wide configuration
|
||||
type Config struct {
|
||||
Master string
|
||||
APIServerURL string
|
||||
KubeConfig string
|
||||
RequestTimeout time.Duration
|
||||
IstioIngressGatewayServices []string
|
||||
@ -99,6 +99,7 @@ type Config struct {
|
||||
OCIConfigFile string
|
||||
InMemoryZones []string
|
||||
OVHEndpoint string
|
||||
OVHApiRateLimit int
|
||||
PDNSServer string
|
||||
PDNSAPIKey string `secure:"yes"`
|
||||
PDNSTLSEnabled bool
|
||||
@ -109,6 +110,7 @@ type Config struct {
|
||||
Registry string
|
||||
TXTOwnerID string
|
||||
TXTPrefix string
|
||||
TXTSuffix string
|
||||
Interval time.Duration
|
||||
Once bool
|
||||
DryRun bool
|
||||
@ -140,13 +142,13 @@ type Config struct {
|
||||
NS1MinTTLSeconds int
|
||||
TransIPAccountName string
|
||||
TransIPPrivateKeyFile string
|
||||
DigitalOceanAPIPageSize int
|
||||
}
|
||||
|
||||
var defaultConfig = &Config{
|
||||
Master: "",
|
||||
APIServerURL: "",
|
||||
KubeConfig: "",
|
||||
RequestTimeout: time.Second * 30,
|
||||
IstioIngressGatewayServices: []string{"istio-system/istio-ingressgateway"},
|
||||
ContourLoadBalancerService: "heptio-contour/contour",
|
||||
SkipperRouteGroupVersion: "zalando.org/v1",
|
||||
Sources: nil,
|
||||
@ -196,6 +198,7 @@ var defaultConfig = &Config{
|
||||
OCIConfigFile: "/etc/kubernetes/oci.yaml",
|
||||
InMemoryZones: []string{},
|
||||
OVHEndpoint: "ovh-eu",
|
||||
OVHApiRateLimit: 20,
|
||||
PDNSServer: "http://localhost:8081",
|
||||
PDNSAPIKey: "",
|
||||
PDNSTLSEnabled: false,
|
||||
@ -206,6 +209,7 @@ var defaultConfig = &Config{
|
||||
Registry: "txt",
|
||||
TXTOwnerID: "default",
|
||||
TXTPrefix: "",
|
||||
TXTSuffix: "",
|
||||
TXTCacheInterval: 0,
|
||||
Interval: time.Minute,
|
||||
Once: false,
|
||||
@ -236,6 +240,7 @@ var defaultConfig = &Config{
|
||||
NS1IgnoreSSL: false,
|
||||
TransIPAccountName: "",
|
||||
TransIPPrivateKeyFile: "",
|
||||
DigitalOceanAPIPageSize: 50,
|
||||
}
|
||||
|
||||
// NewConfig returns new Config object
|
||||
@ -280,7 +285,7 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
app.DefaultEnvars()
|
||||
|
||||
// Flags related to Kubernetes
|
||||
app.Flag("master", "The Kubernetes API server to connect to (default: auto-detect)").Default(defaultConfig.Master).StringVar(&cfg.Master)
|
||||
app.Flag("server", "The Kubernetes API server to connect to (default: auto-detect)").Default(defaultConfig.APIServerURL).StringVar(&cfg.APIServerURL)
|
||||
app.Flag("kubeconfig", "Retrieve target cluster configuration from a Kubernetes configuration file (default: auto-detect)").Default(defaultConfig.KubeConfig).StringVar(&cfg.KubeConfig)
|
||||
app.Flag("request-timeout", "Request timeout when calling Kubernetes APIs. 0s means no timeout").Default(defaultConfig.RequestTimeout.String()).DurationVar(&cfg.RequestTimeout)
|
||||
|
||||
@ -296,7 +301,7 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
app.Flag("skipper-routegroup-groupversion", "The resource version for skipper routegroup").Default(source.DefaultRoutegroupVersion).StringVar(&cfg.SkipperRouteGroupVersion)
|
||||
|
||||
// Flags related to processing sources
|
||||
app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, cloudfoundry, contour-ingressroute, crd, empty, skipper-routegroup,openshift-route)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "istio-gateway", "cloudfoundry", "contour-ingressroute", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route")
|
||||
app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, crd, empty, skipper-routegroup,openshift-route)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route")
|
||||
|
||||
app.Flag("namespace", "Limit sources of endpoints to a specific namespace (default: all namespaces)").Default(defaultConfig.Namespace).StringVar(&cfg.Namespace)
|
||||
app.Flag("annotation-filter", "Filter sources managed by external-dns via annotation using label selector semantics (default: all sources)").Default(defaultConfig.AnnotationFilter).StringVar(&cfg.AnnotationFilter)
|
||||
@ -313,7 +318,7 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
app.Flag("service-type-filter", "The service types to take care about (default: all, expected: ClusterIP, NodePort, LoadBalancer or ExternalName)").StringsVar(&cfg.ServiceTypeFilter)
|
||||
|
||||
// Flags related to providers
|
||||
app.Flag("provider", "The DNS provider where the DNS records will be created (required, options: aws, aws-sd, google, azure, azure-dns, azure-private-dns, cloudflare, rcodezero, digitalocean, dnsimple, akamai, infoblox, dyn, designate, coredns, skydns, inmemory, ovh, pdns, oci, exoscale, linode, rfc2136, ns1, transip, vinyldns, rdns, vultr)").Required().PlaceHolder("provider").EnumVar(&cfg.Provider, "aws", "aws-sd", "google", "azure", "azure-dns", "azure-private-dns", "alibabacloud", "cloudflare", "rcodezero", "digitalocean", "dnsimple", "akamai", "infoblox", "dyn", "designate", "coredns", "skydns", "inmemory", "ovh", "pdns", "oci", "exoscale", "linode", "rfc2136", "ns1", "transip", "vinyldns", "rdns", "vultr")
|
||||
app.Flag("provider", "The DNS provider where the DNS records will be created (required, options: aws, aws-sd, google, azure, azure-dns, azure-private-dns, cloudflare, rcodezero, digitalocean, hetzner, dnsimple, akamai, infoblox, dyn, designate, coredns, skydns, inmemory, ovh, pdns, oci, exoscale, linode, rfc2136, ns1, transip, vinyldns, rdns, vultr, ultradns)").Required().PlaceHolder("provider").EnumVar(&cfg.Provider, "aws", "aws-sd", "google", "azure", "azure-dns", "hetzner", "azure-private-dns", "alibabacloud", "cloudflare", "rcodezero", "digitalocean", "dnsimple", "akamai", "infoblox", "dyn", "designate", "coredns", "skydns", "inmemory", "ovh", "pdns", "oci", "exoscale", "linode", "rfc2136", "ns1", "transip", "vinyldns", "rdns", "vultr", "ultradns")
|
||||
app.Flag("domain-filter", "Limit possible target zones by a domain suffix; specify multiple times for multiple domains (optional)").Default("").StringsVar(&cfg.DomainFilter)
|
||||
app.Flag("exclude-domains", "Exclude subdomains (optional)").Default("").StringsVar(&cfg.ExcludeDomains)
|
||||
app.Flag("zone-id-filter", "Filter target zones by hosted zone id; specify multiple times for multiple zones (optional)").Default("").StringsVar(&cfg.ZoneIDFilter)
|
||||
@ -357,12 +362,14 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
app.Flag("rcodezero-txt-encrypt", "When using the Rcodezero provider with txt registry option, set if TXT rrs are encrypted (default: false)").Default(strconv.FormatBool(defaultConfig.RcodezeroTXTEncrypt)).BoolVar(&cfg.RcodezeroTXTEncrypt)
|
||||
app.Flag("inmemory-zone", "Provide a list of pre-configured zones for the inmemory provider; specify multiple times for multiple zones (optional)").Default("").StringsVar(&cfg.InMemoryZones)
|
||||
app.Flag("ovh-endpoint", "When using the OVH provider, specify the endpoint (default: ovh-eu)").Default(defaultConfig.OVHEndpoint).StringVar(&cfg.OVHEndpoint)
|
||||
app.Flag("ovh-api-rate-limit", "When using the OVH provider, specify the API request rate limit, X operations by seconds (default: 20)").Default(strconv.Itoa(defaultConfig.OVHApiRateLimit)).IntVar(&cfg.OVHApiRateLimit)
|
||||
app.Flag("pdns-server", "When using the PowerDNS/PDNS provider, specify the URL to the pdns server (required when --provider=pdns)").Default(defaultConfig.PDNSServer).StringVar(&cfg.PDNSServer)
|
||||
app.Flag("pdns-api-key", "When using the PowerDNS/PDNS provider, specify the API key to use to authorize requests (required when --provider=pdns)").Default(defaultConfig.PDNSAPIKey).StringVar(&cfg.PDNSAPIKey)
|
||||
app.Flag("pdns-tls-enabled", "When using the PowerDNS/PDNS provider, specify whether to use TLS (default: false, requires --tls-ca, optionally specify --tls-client-cert and --tls-client-cert-key)").Default(strconv.FormatBool(defaultConfig.PDNSTLSEnabled)).BoolVar(&cfg.PDNSTLSEnabled)
|
||||
app.Flag("ns1-endpoint", "When using the NS1 provider, specify the URL of the API endpoint to target (default: https://api.nsone.net/v1/)").Default(defaultConfig.NS1Endpoint).StringVar(&cfg.NS1Endpoint)
|
||||
app.Flag("ns1-ignoressl", "When using the NS1 provider, specify whether to verify the SSL certificate (default: false)").Default(strconv.FormatBool(defaultConfig.NS1IgnoreSSL)).BoolVar(&cfg.NS1IgnoreSSL)
|
||||
app.Flag("ns1-min-ttl", "Minimal TTL (in seconds) for records. This value will be used if the provided TTL for a service/ingress is lower than this.").IntVar(&cfg.NS1MinTTLSeconds)
|
||||
app.Flag("digitalocean-api-page-size", "Configure the page size used when querying the DigitalOcean API.").Default(strconv.Itoa(defaultConfig.DigitalOceanAPIPageSize)).IntVar(&cfg.DigitalOceanAPIPageSize)
|
||||
|
||||
// Flags related to TLS communication
|
||||
app.Flag("tls-ca", "When using TLS communication, the path to the certificate authority to verify server communications (optionally specify --tls-client-cert for two-way TLS)").Default(defaultConfig.TLSCA).StringVar(&cfg.TLSCA)
|
||||
@ -394,7 +401,8 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
// Flags related to the registry
|
||||
app.Flag("registry", "The registry implementation to use to keep track of DNS record ownership (default: txt, options: txt, noop, aws-sd)").Default(defaultConfig.Registry).EnumVar(&cfg.Registry, "txt", "noop", "aws-sd")
|
||||
app.Flag("txt-owner-id", "When using the TXT registry, a name that identifies this instance of ExternalDNS (default: default)").Default(defaultConfig.TXTOwnerID).StringVar(&cfg.TXTOwnerID)
|
||||
app.Flag("txt-prefix", "When using the TXT registry, a custom string that's prefixed to each ownership DNS record (optional)").Default(defaultConfig.TXTPrefix).StringVar(&cfg.TXTPrefix)
|
||||
app.Flag("txt-prefix", "When using the TXT registry, a custom string that's prefixed to each ownership DNS record (optional). Mutual exclusive with txt-suffix!").Default(defaultConfig.TXTPrefix).StringVar(&cfg.TXTPrefix)
|
||||
app.Flag("txt-suffix", "When using the TXT registry, a custom string that's suffixed to the host portion of each ownership DNS record (optional). Mutual exclusive with txt-prefix!").Default(defaultConfig.TXTSuffix).StringVar(&cfg.TXTSuffix)
|
||||
|
||||
// Flags related to the main control loop
|
||||
app.Flag("txt-cache-interval", "The interval between cache synchronizations in duration format (default: disabled)").Default(defaultConfig.TXTCacheInterval.String()).DurationVar(&cfg.TXTCacheInterval)
|
||||
|
@ -29,17 +29,17 @@ import (
|
||||
|
||||
var (
|
||||
minimalConfig = &Config{
|
||||
Master: "",
|
||||
KubeConfig: "",
|
||||
RequestTimeout: time.Second * 30,
|
||||
ContourLoadBalancerService: "heptio-contour/contour",
|
||||
SkipperRouteGroupVersion: "zalando.org/v1",
|
||||
Sources: []string{"service"},
|
||||
Namespace: "",
|
||||
FQDNTemplate: "",
|
||||
Compatibility: "",
|
||||
Provider: "google",
|
||||
GoogleProject: "",
|
||||
APIServerURL: "",
|
||||
KubeConfig: "",
|
||||
RequestTimeout: time.Second * 30,
|
||||
ContourLoadBalancerService: "heptio-contour/contour",
|
||||
SkipperRouteGroupVersion: "zalando.org/v1",
|
||||
Sources: []string{"service"},
|
||||
Namespace: "",
|
||||
FQDNTemplate: "",
|
||||
Compatibility: "",
|
||||
Provider: "google",
|
||||
GoogleProject: "",
|
||||
GoogleBatchChangeSize: 1000,
|
||||
GoogleBatchChangeInterval: time.Second,
|
||||
DomainFilter: []string{""},
|
||||
@ -75,6 +75,7 @@ var (
|
||||
OCIConfigFile: "/etc/kubernetes/oci.yaml",
|
||||
InMemoryZones: []string{""},
|
||||
OVHEndpoint: "ovh-eu",
|
||||
OVHApiRateLimit: 20,
|
||||
PDNSServer: "http://localhost:8081",
|
||||
PDNSAPIKey: "",
|
||||
Policy: "sync",
|
||||
@ -98,20 +99,21 @@ var (
|
||||
RcodezeroTXTEncrypt: false,
|
||||
TransIPAccountName: "",
|
||||
TransIPPrivateKeyFile: "",
|
||||
DigitalOceanAPIPageSize: 50,
|
||||
}
|
||||
|
||||
overriddenConfig = &Config{
|
||||
Master: "http://127.0.0.1:8080",
|
||||
KubeConfig: "/some/path",
|
||||
RequestTimeout: time.Second * 77,
|
||||
ContourLoadBalancerService: "heptio-contour-other/contour-other",
|
||||
SkipperRouteGroupVersion: "zalando.org/v2",
|
||||
Sources: []string{"service", "ingress", "connector"},
|
||||
Namespace: "namespace",
|
||||
IgnoreHostnameAnnotation: true,
|
||||
FQDNTemplate: "{{.Name}}.service.example.com",
|
||||
Compatibility: "mate",
|
||||
Provider: "google",
|
||||
APIServerURL: "http://127.0.0.1:8080",
|
||||
KubeConfig: "/some/path",
|
||||
RequestTimeout: time.Second * 77,
|
||||
ContourLoadBalancerService: "heptio-contour-other/contour-other",
|
||||
SkipperRouteGroupVersion: "zalando.org/v2",
|
||||
Sources: []string{"service", "ingress", "connector"},
|
||||
Namespace: "namespace",
|
||||
IgnoreHostnameAnnotation: true,
|
||||
FQDNTemplate: "{{.Name}}.service.example.com",
|
||||
Compatibility: "mate",
|
||||
Provider: "google",
|
||||
GoogleProject: "project",
|
||||
GoogleBatchChangeSize: 100,
|
||||
GoogleBatchChangeInterval: time.Second * 2,
|
||||
@ -148,6 +150,7 @@ var (
|
||||
OCIConfigFile: "oci.yaml",
|
||||
InMemoryZones: []string{"example.org", "company.com"},
|
||||
OVHEndpoint: "ovh-ca",
|
||||
OVHApiRateLimit: 42,
|
||||
PDNSServer: "http://ns.example.com:8081",
|
||||
PDNSAPIKey: "some-secret-key",
|
||||
PDNSTLSEnabled: true,
|
||||
@ -177,6 +180,7 @@ var (
|
||||
NS1IgnoreSSL: true,
|
||||
TransIPAccountName: "transip",
|
||||
TransIPPrivateKeyFile: "/path/to/transip.key",
|
||||
DigitalOceanAPIPageSize: 100,
|
||||
}
|
||||
)
|
||||
|
||||
@ -199,7 +203,7 @@ func TestParseFlags(t *testing.T) {
|
||||
{
|
||||
title: "override everything via flags",
|
||||
args: []string{
|
||||
"--master=http://127.0.0.1:8080",
|
||||
"--server=http://127.0.0.1:8080",
|
||||
"--kubeconfig=/some/path",
|
||||
"--request-timeout=77s",
|
||||
"--contour-load-balancer=heptio-contour-other/contour-other",
|
||||
@ -235,6 +239,7 @@ func TestParseFlags(t *testing.T) {
|
||||
"--inmemory-zone=example.org",
|
||||
"--inmemory-zone=company.com",
|
||||
"--ovh-endpoint=ovh-ca",
|
||||
"--ovh-api-rate-limit=42",
|
||||
"--pdns-server=http://ns.example.com:8081",
|
||||
"--pdns-api-key=some-secret-key",
|
||||
"--pdns-tls-enabled",
|
||||
@ -280,6 +285,7 @@ func TestParseFlags(t *testing.T) {
|
||||
"--ns1-ignoressl",
|
||||
"--transip-account=transip",
|
||||
"--transip-keyfile=/path/to/transip.key",
|
||||
"--digitalocean-api-page-size=100",
|
||||
},
|
||||
envVars: map[string]string{},
|
||||
expected: overriddenConfig,
|
||||
@ -288,7 +294,7 @@ func TestParseFlags(t *testing.T) {
|
||||
title: "override everything via environment variables",
|
||||
args: []string{},
|
||||
envVars: map[string]string{
|
||||
"EXTERNAL_DNS_MASTER": "http://127.0.0.1:8080",
|
||||
"EXTERNAL_DNS_SERVER": "http://127.0.0.1:8080",
|
||||
"EXTERNAL_DNS_KUBECONFIG": "/some/path",
|
||||
"EXTERNAL_DNS_REQUEST_TIMEOUT": "77s",
|
||||
"EXTERNAL_DNS_CONTOUR_LOAD_BALANCER": "heptio-contour-other/contour-other",
|
||||
@ -323,6 +329,7 @@ func TestParseFlags(t *testing.T) {
|
||||
"EXTERNAL_DNS_OCI_CONFIG_FILE": "oci.yaml",
|
||||
"EXTERNAL_DNS_INMEMORY_ZONE": "example.org\ncompany.com",
|
||||
"EXTERNAL_DNS_OVH_ENDPOINT": "ovh-ca",
|
||||
"EXTERNAL_DNS_OVH_API_RATE_LIMIT": "42",
|
||||
"EXTERNAL_DNS_DOMAIN_FILTER": "example.org\ncompany.com",
|
||||
"EXTERNAL_DNS_EXCLUDE_DOMAINS": "xapi.example.org\nxapi.company.com",
|
||||
"EXTERNAL_DNS_PDNS_SERVER": "http://ns.example.com:8081",
|
||||
@ -364,6 +371,7 @@ func TestParseFlags(t *testing.T) {
|
||||
"EXTERNAL_DNS_NS1_IGNORESSL": "1",
|
||||
"EXTERNAL_DNS_TRANSIP_ACCOUNT": "transip",
|
||||
"EXTERNAL_DNS_TRANSIP_KEYFILE": "/path/to/transip.key",
|
||||
"EXTERNAL_DNS_DIGITALOCEAN_API_PAGE_SIZE": "100",
|
||||
},
|
||||
expected: overriddenConfig,
|
||||
},
|
||||
|
@ -91,5 +91,10 @@ func ValidateConfig(cfg *externaldns.Config) error {
|
||||
if cfg.IgnoreHostnameAnnotation && cfg.FQDNTemplate == "" {
|
||||
return errors.New("FQDN Template must be set if ignoring annotations")
|
||||
}
|
||||
|
||||
if len(cfg.TXTPrefix) > 0 && len(cfg.TXTSuffix) > 0 {
|
||||
return errors.New("txt-prefix and txt-suffix are mutual exclusive")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1,238 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package async
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// BoundedFrequencyRunner manages runs of a user-provided function.
|
||||
// See NewBoundedFrequencyRunner for examples.
|
||||
type BoundedFrequencyRunner struct {
|
||||
name string // the name of this instance
|
||||
minInterval time.Duration // the min time between runs, modulo bursts
|
||||
maxInterval time.Duration // the max time between runs
|
||||
|
||||
run chan struct{} // try an async run
|
||||
|
||||
mu sync.Mutex // guards runs of fn and all mutations
|
||||
fn func() // function to run
|
||||
lastRun time.Time // time of last run
|
||||
timer timer // timer for deferred runs
|
||||
limiter rateLimiter // rate limiter for on-demand runs
|
||||
}
|
||||
|
||||
// designed so that flowcontrol.RateLimiter satisfies
|
||||
type rateLimiter interface {
|
||||
TryAccept() bool
|
||||
Stop()
|
||||
}
|
||||
|
||||
type nullLimiter struct{}
|
||||
|
||||
func (nullLimiter) TryAccept() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (nullLimiter) Stop() {}
|
||||
|
||||
var _ rateLimiter = nullLimiter{}
|
||||
|
||||
// for testing
|
||||
type timer interface {
|
||||
// C returns the timer's selectable channel.
|
||||
C() <-chan time.Time
|
||||
|
||||
// See time.Timer.Reset.
|
||||
Reset(d time.Duration) bool
|
||||
|
||||
// See time.Timer.Stop.
|
||||
Stop() bool
|
||||
|
||||
// See time.Now.
|
||||
Now() time.Time
|
||||
|
||||
// See time.Since.
|
||||
Since(t time.Time) time.Duration
|
||||
|
||||
// See time.Sleep.
|
||||
Sleep(d time.Duration)
|
||||
}
|
||||
|
||||
// implement our timer in terms of std time.Timer.
|
||||
type realTimer struct {
|
||||
*time.Timer
|
||||
}
|
||||
|
||||
func (rt realTimer) C() <-chan time.Time {
|
||||
return rt.Timer.C
|
||||
}
|
||||
|
||||
func (rt realTimer) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (rt realTimer) Since(t time.Time) time.Duration {
|
||||
return time.Since(t)
|
||||
}
|
||||
|
||||
func (rt realTimer) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
var _ timer = realTimer{}
|
||||
|
||||
// NewBoundedFrequencyRunner creates a new BoundedFrequencyRunner instance,
|
||||
// which will manage runs of the specified function.
|
||||
//
|
||||
// All runs will be async to the caller of BoundedFrequencyRunner.Run, but
|
||||
// multiple runs are serialized. If the function needs to hold locks, it must
|
||||
// take them internally.
|
||||
//
|
||||
// Runs of the function will have at least minInterval between them (from
|
||||
// completion to next start), except that up to bursts may be allowed. Burst
|
||||
// runs are "accumulated" over time, one per minInterval up to burstRuns total.
|
||||
// This can be used, for example, to mitigate the impact of expensive operations
|
||||
// being called in response to user-initiated operations. Run requests that
|
||||
// would violate the minInterval are coallesced and run at the next opportunity.
|
||||
//
|
||||
// The function will be run at least once per maxInterval. For example, this can
|
||||
// force periodic refreshes of state in the absence of anyone calling Run.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// NewBoundedFrequencyRunner("name", fn, time.Second, 5*time.Second, 1)
|
||||
// - fn will have at least 1 second between runs
|
||||
// - fn will have no more than 5 seconds between runs
|
||||
//
|
||||
// NewBoundedFrequencyRunner("name", fn, 3*time.Second, 10*time.Second, 3)
|
||||
// - fn will have at least 3 seconds between runs, with up to 3 burst runs
|
||||
// - fn will have no more than 10 seconds between runs
|
||||
//
|
||||
// The maxInterval must be greater than or equal to the minInterval, If the
|
||||
// caller passes a maxInterval less than minInterval, this function will panic.
|
||||
func NewBoundedFrequencyRunner(name string, fn func(), minInterval, maxInterval time.Duration, burstRuns int) *BoundedFrequencyRunner {
|
||||
timer := realTimer{Timer: time.NewTimer(0)} // will tick immediately
|
||||
<-timer.C() // consume the first tick
|
||||
return construct(name, fn, minInterval, maxInterval, burstRuns, timer)
|
||||
}
|
||||
|
||||
// Make an instance with dependencies injected.
|
||||
func construct(name string, fn func(), minInterval, maxInterval time.Duration, burstRuns int, timer timer) *BoundedFrequencyRunner {
|
||||
if maxInterval < minInterval {
|
||||
panic(fmt.Sprintf("%s: maxInterval (%v) must be >= minInterval (%v)", name, minInterval, maxInterval))
|
||||
}
|
||||
if timer == nil {
|
||||
panic(fmt.Sprintf("%s: timer must be non-nil", name))
|
||||
}
|
||||
|
||||
bfr := &BoundedFrequencyRunner{
|
||||
name: name,
|
||||
fn: fn,
|
||||
minInterval: minInterval,
|
||||
maxInterval: maxInterval,
|
||||
run: make(chan struct{}, 1),
|
||||
timer: timer,
|
||||
}
|
||||
if minInterval == 0 {
|
||||
bfr.limiter = nullLimiter{}
|
||||
} else {
|
||||
// allow burst updates in short succession
|
||||
qps := float32(time.Second) / float32(minInterval)
|
||||
bfr.limiter = flowcontrol.NewTokenBucketRateLimiterWithClock(qps, burstRuns, timer)
|
||||
}
|
||||
return bfr
|
||||
}
|
||||
|
||||
// Loop handles the periodic timer and run requests. This is expected to be
|
||||
// called as a goroutine.
|
||||
func (bfr *BoundedFrequencyRunner) Loop(stop <-chan struct{}) {
|
||||
klog.V(3).Infof("%s Loop running", bfr.name)
|
||||
bfr.timer.Reset(bfr.maxInterval)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
bfr.stop()
|
||||
klog.V(3).Infof("%s Loop stopping", bfr.name)
|
||||
return
|
||||
case <-bfr.timer.C():
|
||||
bfr.tryRun()
|
||||
case <-bfr.run:
|
||||
bfr.tryRun()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the function as soon as possible. If this is called while Loop is not
|
||||
// running, the call may be deferred indefinitely.
|
||||
// If there is already a queued request to call the underlying function, it
|
||||
// may be dropped - it is just guaranteed that we will try calling the
|
||||
// underlying function as soon as possible starting from now.
|
||||
func (bfr *BoundedFrequencyRunner) Run() {
|
||||
// If it takes a lot of time to run the underlying function, noone is really
|
||||
// processing elements from <run> channel. So to avoid blocking here on the
|
||||
// putting element to it, we simply skip it if there is already an element
|
||||
// in it.
|
||||
select {
|
||||
case bfr.run <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// assumes the lock is not held
|
||||
func (bfr *BoundedFrequencyRunner) stop() {
|
||||
bfr.mu.Lock()
|
||||
defer bfr.mu.Unlock()
|
||||
bfr.limiter.Stop()
|
||||
bfr.timer.Stop()
|
||||
}
|
||||
|
||||
// assumes the lock is not held
|
||||
func (bfr *BoundedFrequencyRunner) tryRun() {
|
||||
bfr.mu.Lock()
|
||||
defer bfr.mu.Unlock()
|
||||
|
||||
if bfr.limiter.TryAccept() {
|
||||
// We're allowed to run the function right now.
|
||||
bfr.fn()
|
||||
bfr.lastRun = bfr.timer.Now()
|
||||
bfr.timer.Stop()
|
||||
bfr.timer.Reset(bfr.maxInterval)
|
||||
klog.V(3).Infof("%s: ran, next possible in %v, periodic in %v", bfr.name, bfr.minInterval, bfr.maxInterval)
|
||||
return
|
||||
}
|
||||
|
||||
// It can't run right now, figure out when it can run next.
|
||||
|
||||
elapsed := bfr.timer.Since(bfr.lastRun) // how long since last run
|
||||
nextPossible := bfr.minInterval - elapsed // time to next possible run
|
||||
nextScheduled := bfr.maxInterval - elapsed // time to next periodic run
|
||||
klog.V(4).Infof("%s: %v since last run, possible in %v, scheduled in %v", bfr.name, elapsed, nextPossible, nextScheduled)
|
||||
|
||||
if nextPossible < nextScheduled {
|
||||
// Set the timer for ASAP, but don't drain here. Assuming Loop is running,
|
||||
// it might get a delivery in the mean time, but that is OK.
|
||||
bfr.timer.Stop()
|
||||
bfr.timer.Reset(nextPossible)
|
||||
klog.V(3).Infof("%s: throttled, scheduling run in %v", bfr.name, nextPossible)
|
||||
}
|
||||
}
|
94
plan/plan.go
94
plan/plan.go
@ -18,11 +18,15 @@ package plan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
)
|
||||
|
||||
// PropertyComparator is used in Plan for comparing the previous and current custom annotations.
|
||||
type PropertyComparator func(name string, previous string, current string) bool
|
||||
|
||||
// Plan can convert a list of desired and current records to a series of create,
|
||||
// update and delete actions.
|
||||
type Plan struct {
|
||||
@ -37,6 +41,8 @@ type Plan struct {
|
||||
Changes *Changes
|
||||
// DomainFilter matches DNS names
|
||||
DomainFilter endpoint.DomainFilter
|
||||
// Property comparator compares custom properties of providers
|
||||
PropertyComparator PropertyComparator
|
||||
}
|
||||
|
||||
// Changes holds lists of actions to be executed by dns providers
|
||||
@ -135,7 +141,7 @@ func (p *Plan) Calculate() *Plan {
|
||||
if row.current != nil && len(row.candidates) > 0 { //dns name is taken
|
||||
update := t.resolver.ResolveUpdate(row.current, row.candidates)
|
||||
// compare "update" to "current" to figure out if actual update is required
|
||||
if shouldUpdateTTL(update, row.current) || targetChanged(update, row.current) || shouldUpdateProviderSpecific(update, row.current) {
|
||||
if shouldUpdateTTL(update, row.current) || targetChanged(update, row.current) || p.shouldUpdateProviderSpecific(update, row.current) {
|
||||
inheritOwner(row.current, update)
|
||||
changes.UpdateNew = append(changes.UpdateNew, update)
|
||||
changes.UpdateOld = append(changes.UpdateOld, row.current)
|
||||
@ -178,45 +184,40 @@ func shouldUpdateTTL(desired, current *endpoint.Endpoint) bool {
|
||||
return desired.RecordTTL != current.RecordTTL
|
||||
}
|
||||
|
||||
func shouldUpdateProviderSpecific(desired, current *endpoint.Endpoint) bool {
|
||||
if current.ProviderSpecific == nil && len(desired.ProviderSpecific) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, c := range current.ProviderSpecific {
|
||||
// don't consider target health when detecting changes
|
||||
// see: https://github.com/kubernetes-sigs/external-dns/issues/869#issuecomment-458576954
|
||||
if c.Name == "aws/evaluate-target-health" {
|
||||
continue
|
||||
}
|
||||
func (p *Plan) shouldUpdateProviderSpecific(desired, current *endpoint.Endpoint) bool {
|
||||
desiredProperties := map[string]endpoint.ProviderSpecificProperty{}
|
||||
|
||||
found := false
|
||||
if desired.ProviderSpecific != nil {
|
||||
for _, d := range desired.ProviderSpecific {
|
||||
if d.Name == c.Name {
|
||||
if d.Value != c.Value {
|
||||
// provider-specific attribute updated
|
||||
desiredProperties[d.Name] = d
|
||||
}
|
||||
}
|
||||
if current.ProviderSpecific != nil {
|
||||
for _, c := range current.ProviderSpecific {
|
||||
// don't consider target health when detecting changes
|
||||
// see: https://github.com/kubernetes-sigs/external-dns/issues/869#issuecomment-458576954
|
||||
if c.Name == "aws/evaluate-target-health" {
|
||||
continue
|
||||
}
|
||||
|
||||
if d, ok := desiredProperties[c.Name]; ok {
|
||||
if p.PropertyComparator != nil {
|
||||
if !p.PropertyComparator(c.Name, c.Value, d.Value) {
|
||||
return true
|
||||
}
|
||||
} else if c.Value != d.Value {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
if p.PropertyComparator != nil {
|
||||
if !p.PropertyComparator(c.Name, c.Value, "") {
|
||||
return true
|
||||
}
|
||||
} else if c.Value != "" {
|
||||
return true
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// provider-specific attribute deleted
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, d := range desired.ProviderSpecific {
|
||||
found := false
|
||||
for _, c := range current.ProviderSpecific {
|
||||
if d.Name == c.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// provider-specific attribute added
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
@ -260,3 +261,28 @@ func normalizeDNSName(dnsName string) string {
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// CompareBoolean is an implementation of PropertyComparator for comparing boolean-line values
|
||||
// For example external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
|
||||
// If value doesn't parse as boolean, the defaultValue is used
|
||||
func CompareBoolean(defaultValue bool, name, current, previous string) bool {
|
||||
var err error
|
||||
|
||||
v1, v2 := defaultValue, defaultValue
|
||||
|
||||
if previous != "" {
|
||||
v1, err = strconv.ParseBool(previous)
|
||||
if err != nil {
|
||||
v1 = defaultValue
|
||||
}
|
||||
}
|
||||
|
||||
if current != "" {
|
||||
v2, err = strconv.ParseBool(current)
|
||||
if err != nil {
|
||||
v2 = defaultValue
|
||||
}
|
||||
}
|
||||
|
||||
return v1 == v2
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ type PlanTestSuite struct {
|
||||
bar127AWithTTL *endpoint.Endpoint
|
||||
bar127AWithProviderSpecificTrue *endpoint.Endpoint
|
||||
bar127AWithProviderSpecificFalse *endpoint.Endpoint
|
||||
bar127AWithProviderSpecificUnset *endpoint.Endpoint
|
||||
bar192A *endpoint.Endpoint
|
||||
multiple1 *endpoint.Endpoint
|
||||
multiple2 *endpoint.Endpoint
|
||||
@ -138,6 +139,15 @@ func (suite *PlanTestSuite) SetupTest() {
|
||||
},
|
||||
},
|
||||
}
|
||||
suite.bar127AWithProviderSpecificUnset = &endpoint.Endpoint{
|
||||
DNSName: "bar",
|
||||
Targets: endpoint.Targets{"127.0.0.1"},
|
||||
RecordType: "A",
|
||||
Labels: map[string]string{
|
||||
endpoint.ResourceLabelKey: "ingress/default/bar-127",
|
||||
},
|
||||
ProviderSpecific: endpoint.ProviderSpecific{},
|
||||
}
|
||||
suite.bar192A = &endpoint.Endpoint{
|
||||
DNSName: "bar",
|
||||
Targets: endpoint.Targets{"192.168.0.1"},
|
||||
@ -291,6 +301,54 @@ func (suite *PlanTestSuite) TestSyncSecondRoundWithProviderSpecificChange() {
|
||||
validateEntries(suite.T(), changes.Delete, expectedDelete)
|
||||
}
|
||||
|
||||
func (suite *PlanTestSuite) TestSyncSecondRoundWithProviderSpecificDefaultFalse() {
|
||||
current := []*endpoint.Endpoint{suite.bar127AWithProviderSpecificFalse}
|
||||
desired := []*endpoint.Endpoint{suite.bar127AWithProviderSpecificUnset}
|
||||
expectedCreate := []*endpoint.Endpoint{}
|
||||
expectedUpdateOld := []*endpoint.Endpoint{}
|
||||
expectedUpdateNew := []*endpoint.Endpoint{}
|
||||
expectedDelete := []*endpoint.Endpoint{}
|
||||
|
||||
p := &Plan{
|
||||
Policies: []Policy{&SyncPolicy{}},
|
||||
Current: current,
|
||||
Desired: desired,
|
||||
PropertyComparator: func(name, previous, current string) bool {
|
||||
return CompareBoolean(false, name, previous, current)
|
||||
},
|
||||
}
|
||||
|
||||
changes := p.Calculate().Changes
|
||||
validateEntries(suite.T(), changes.Create, expectedCreate)
|
||||
validateEntries(suite.T(), changes.UpdateNew, expectedUpdateNew)
|
||||
validateEntries(suite.T(), changes.UpdateOld, expectedUpdateOld)
|
||||
validateEntries(suite.T(), changes.Delete, expectedDelete)
|
||||
}
|
||||
|
||||
func (suite *PlanTestSuite) TestSyncSecondRoundWithProviderSpecificDefualtTrue() {
|
||||
current := []*endpoint.Endpoint{suite.bar127AWithProviderSpecificTrue}
|
||||
desired := []*endpoint.Endpoint{suite.bar127AWithProviderSpecificUnset}
|
||||
expectedCreate := []*endpoint.Endpoint{}
|
||||
expectedUpdateOld := []*endpoint.Endpoint{}
|
||||
expectedUpdateNew := []*endpoint.Endpoint{}
|
||||
expectedDelete := []*endpoint.Endpoint{}
|
||||
|
||||
p := &Plan{
|
||||
Policies: []Policy{&SyncPolicy{}},
|
||||
Current: current,
|
||||
Desired: desired,
|
||||
PropertyComparator: func(name, previous, current string) bool {
|
||||
return CompareBoolean(true, name, previous, current)
|
||||
},
|
||||
}
|
||||
|
||||
changes := p.Calculate().Changes
|
||||
validateEntries(suite.T(), changes.Create, expectedCreate)
|
||||
validateEntries(suite.T(), changes.UpdateNew, expectedUpdateNew)
|
||||
validateEntries(suite.T(), changes.UpdateOld, expectedUpdateOld)
|
||||
validateEntries(suite.T(), changes.Delete, expectedDelete)
|
||||
}
|
||||
|
||||
func (suite *PlanTestSuite) TestSyncSecondRoundWithOwnerInherited() {
|
||||
current := []*endpoint.Endpoint{suite.fooV1Cname}
|
||||
desired := []*endpoint.Endpoint{suite.fooV2Cname}
|
||||
|
@ -61,6 +61,7 @@ type AkamaiConfig struct {
|
||||
|
||||
// AkamaiProvider implements the DNS provider for Akamai.
|
||||
type AkamaiProvider struct {
|
||||
provider.BaseProvider
|
||||
domainFilter endpoint.DomainFilter
|
||||
zoneIDFilter provider.ZoneIDFilter
|
||||
config edgegrid.Config
|
||||
@ -292,7 +293,6 @@ func (p *AkamaiProvider) newAkamaiRecord(dnsName, recordType string, targets ...
|
||||
|
||||
func (p *AkamaiProvider) createRecords(zoneNameIDMapper provider.ZoneIDName, endpoints []*endpoint.Endpoint) (created []*endpoint.Endpoint, failed []*endpoint.Endpoint) {
|
||||
for _, endpoint := range endpoints {
|
||||
|
||||
if !p.domainFilter.Match(endpoint.DNSName) {
|
||||
log.Debugf("Skipping creation at Akamai of endpoint DNSName: '%s' RecordType: '%s', it does not match against Domain filters", endpoint.DNSName, endpoint.RecordType)
|
||||
continue
|
||||
@ -323,7 +323,6 @@ func (p *AkamaiProvider) createRecords(zoneNameIDMapper provider.ZoneIDName, end
|
||||
|
||||
func (p *AkamaiProvider) deleteRecords(zoneNameIDMapper provider.ZoneIDName, endpoints []*endpoint.Endpoint) (deleted []*endpoint.Endpoint, failed []*endpoint.Endpoint) {
|
||||
for _, endpoint := range endpoints {
|
||||
|
||||
if !p.domainFilter.Match(endpoint.DNSName) {
|
||||
log.Debugf("Skipping deletion at Akamai of endpoint: '%s' type: '%s', it does not match against Domain filters", endpoint.DNSName, endpoint.RecordType)
|
||||
continue
|
||||
@ -352,7 +351,6 @@ func (p *AkamaiProvider) deleteRecords(zoneNameIDMapper provider.ZoneIDName, end
|
||||
|
||||
func (p *AkamaiProvider) updateNewRecords(zoneNameIDMapper provider.ZoneIDName, endpoints []*endpoint.Endpoint) (updated []*endpoint.Endpoint, failed []*endpoint.Endpoint) {
|
||||
for _, endpoint := range endpoints {
|
||||
|
||||
if !p.domainFilter.Match(endpoint.DNSName) {
|
||||
log.Debugf("Skipping update at Akamai of endpoint DNSName: '%s' RecordType: '%s', it does not match against Domain filters", endpoint.DNSName, endpoint.RecordType)
|
||||
continue
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/pvtz"
|
||||
"github.com/denverdino/aliyungo/metadata"
|
||||
log "github.com/sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
@ -67,6 +67,7 @@ type AlibabaCloudPrivateZoneAPI interface {
|
||||
|
||||
// AlibabaCloudProvider implements the DNS provider for Alibaba Cloud.
|
||||
type AlibabaCloudProvider struct {
|
||||
provider.BaseProvider
|
||||
domainFilter endpoint.DomainFilter
|
||||
zoneIDFilter provider.ZoneIDFilter // Private Zone only
|
||||
MaxChangeCount int
|
||||
@ -99,17 +100,17 @@ func NewAlibabaCloudProvider(configFile string, domainFilter endpoint.DomainFilt
|
||||
if configFile != "" {
|
||||
contents, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read Alibaba Cloud config file '%s': %v", configFile, err)
|
||||
return nil, fmt.Errorf("failed to read Alibaba Cloud config file '%s': %v", configFile, err)
|
||||
}
|
||||
err = yaml.Unmarshal(contents, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse Alibaba Cloud config file '%s': %v", configFile, err)
|
||||
return nil, fmt.Errorf("failed to parse Alibaba Cloud config file '%s': %v", configFile, err)
|
||||
}
|
||||
} else {
|
||||
var tmpError error
|
||||
cfg, tmpError = getCloudConfigFromStsToken()
|
||||
if tmpError != nil {
|
||||
return nil, fmt.Errorf("Failed to getCloudConfigFromStsToken: %v", tmpError)
|
||||
return nil, fmt.Errorf("failed to getCloudConfigFromStsToken: %v", tmpError)
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,19 +182,19 @@ func getCloudConfigFromStsToken() (alibabaCloudConfig, error) {
|
||||
roleName := ""
|
||||
var err error
|
||||
if roleName, err = m.RoleName(); err != nil {
|
||||
return cfg, fmt.Errorf("Failed to get role name from Metadata Service: %v", err)
|
||||
return cfg, fmt.Errorf("failed to get role name from Metadata Service: %v", err)
|
||||
}
|
||||
vpcID, err := m.VpcID()
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("Failed to get VPC ID from Metadata Service: %v", err)
|
||||
return cfg, fmt.Errorf("failed to get VPC ID from Metadata Service: %v", err)
|
||||
}
|
||||
regionID, err := m.Region()
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("Failed to get Region ID from Metadata Service: %v", err)
|
||||
return cfg, fmt.Errorf("failed to get Region ID from Metadata Service: %v", err)
|
||||
}
|
||||
role, err := m.RamRoleToken(roleName)
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("Failed to get STS Token from Metadata Service: %v", err)
|
||||
return cfg, fmt.Errorf("failed to get STS Token from Metadata Service: %v", err)
|
||||
}
|
||||
cfg.RegionID = regionID
|
||||
cfg.RoleName = roleName
|
||||
@ -316,7 +317,6 @@ func (p *AlibabaCloudProvider) getDNSName(rr, domain string) string {
|
||||
//
|
||||
// Returns the current records or an error if the operation failed.
|
||||
func (p *AlibabaCloudProvider) recordsForDNS() (endpoints []*endpoint.Endpoint, _ error) {
|
||||
|
||||
records, err := p.records()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -344,8 +344,7 @@ func (p *AlibabaCloudProvider) recordsForDNS() (endpoints []*endpoint.Endpoint,
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func getNextPageNumber(pageNumber, pageSize, totalCount int) int {
|
||||
|
||||
func getNextPageNumber(pageNumber, pageSize, totalCount int64) int64 {
|
||||
if pageNumber*pageSize >= totalCount {
|
||||
return 0
|
||||
}
|
||||
@ -364,18 +363,13 @@ func (p *AlibabaCloudProvider) getRecordKeyByEndpoint(endpoint *endpoint.Endpoin
|
||||
}
|
||||
|
||||
func (p *AlibabaCloudProvider) groupRecords(records []alidns.Record) (endpointMap map[string][]alidns.Record) {
|
||||
|
||||
endpointMap = make(map[string][]alidns.Record)
|
||||
|
||||
for _, record := range records {
|
||||
|
||||
key := p.getRecordKey(record)
|
||||
|
||||
recordList := endpointMap[key]
|
||||
endpointMap[key] = append(recordList, record)
|
||||
|
||||
}
|
||||
|
||||
return endpointMap
|
||||
}
|
||||
|
||||
@ -429,7 +423,7 @@ func (p *AlibabaCloudProvider) getDomainList() ([]string, error) {
|
||||
if nextPage == 0 {
|
||||
break
|
||||
} else {
|
||||
request.PageNumber = requests.NewInteger(nextPage)
|
||||
request.PageNumber = requests.NewInteger64(nextPage)
|
||||
}
|
||||
}
|
||||
return domainNames, nil
|
||||
@ -450,18 +444,15 @@ func (p *AlibabaCloudProvider) getDomainRecords(domainName string) ([]alidns.Rec
|
||||
}
|
||||
|
||||
for _, record := range response.DomainRecords.Record {
|
||||
|
||||
domainName := record.DomainName
|
||||
recordType := record.Type
|
||||
|
||||
if !p.domainFilter.Match(domainName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !provider.SupportedRecordType(recordType) {
|
||||
continue
|
||||
}
|
||||
|
||||
//TODO filter Locked record
|
||||
results = append(results, record)
|
||||
}
|
||||
@ -469,7 +460,7 @@ func (p *AlibabaCloudProvider) getDomainRecords(domainName string) ([]alidns.Rec
|
||||
if nextPage == 0 {
|
||||
break
|
||||
} else {
|
||||
request.PageNumber = requests.NewInteger(nextPage)
|
||||
request.PageNumber = requests.NewInteger64(nextPage)
|
||||
}
|
||||
}
|
||||
|
||||
@ -614,7 +605,7 @@ func (p *AlibabaCloudProvider) equals(record alidns.Record, endpoint *endpoint.E
|
||||
ttl1 = 0
|
||||
}
|
||||
|
||||
ttl2 := int(endpoint.RecordTTL)
|
||||
ttl2 := int64(endpoint.RecordTTL)
|
||||
if ttl2 == defaultAlibabaCloudRecordTTL {
|
||||
ttl2 = 0
|
||||
}
|
||||
@ -623,7 +614,6 @@ func (p *AlibabaCloudProvider) equals(record alidns.Record, endpoint *endpoint.E
|
||||
}
|
||||
|
||||
func (p *AlibabaCloudProvider) updateRecords(recordMap map[string][]alidns.Record, endpoints []*endpoint.Endpoint) error {
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
key := p.getRecordKeyByEndpoint(endpoint)
|
||||
records := recordMap[key]
|
||||
@ -668,7 +658,6 @@ func (p *AlibabaCloudProvider) updateRecords(recordMap map[string][]alidns.Recor
|
||||
}
|
||||
|
||||
func (p *AlibabaCloudProvider) splitDNSName(endpoint *endpoint.Endpoint) (rr string, domain string) {
|
||||
|
||||
name := strings.TrimSuffix(endpoint.DNSName, ".")
|
||||
|
||||
found := false
|
||||
@ -728,7 +717,6 @@ func (p *AlibabaCloudProvider) matchVPC(zoneID string) bool {
|
||||
}
|
||||
|
||||
func (p *AlibabaCloudProvider) privateZones() ([]pvtz.Zone, error) {
|
||||
|
||||
var zones []pvtz.Zone
|
||||
|
||||
request := pvtz.CreateDescribeZonesRequest()
|
||||
@ -755,11 +743,11 @@ func (p *AlibabaCloudProvider) privateZones() ([]pvtz.Zone, error) {
|
||||
}
|
||||
zones = append(zones, zone)
|
||||
}
|
||||
nextPage := getNextPageNumber(response.PageNumber, defaultAlibabaCloudPageSize, response.TotalItems)
|
||||
nextPage := getNextPageNumber(int64(response.PageNumber), defaultAlibabaCloudPageSize, int64(response.TotalItems))
|
||||
if nextPage == 0 {
|
||||
break
|
||||
} else {
|
||||
request.PageNumber = requests.NewInteger(nextPage)
|
||||
request.PageNumber = requests.NewInteger64(nextPage)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
@ -783,7 +771,6 @@ func (p *AlibabaCloudProvider) getPrivateZones() (map[string]*alibabaPrivateZone
|
||||
}
|
||||
|
||||
for _, zone := range zones {
|
||||
|
||||
request := pvtz.CreateDescribeZoneRecordsRequest()
|
||||
request.ZoneId = zone.ZoneId
|
||||
request.PageSize = requests.NewInteger(defaultAlibabaCloudPageSize)
|
||||
@ -800,7 +787,6 @@ func (p *AlibabaCloudProvider) getPrivateZones() (map[string]*alibabaPrivateZone
|
||||
}
|
||||
|
||||
for _, record := range response.Records.Record {
|
||||
|
||||
recordType := record.Type
|
||||
|
||||
if !provider.SupportedRecordType(recordType) {
|
||||
@ -810,11 +796,11 @@ func (p *AlibabaCloudProvider) getPrivateZones() (map[string]*alibabaPrivateZone
|
||||
//TODO filter Locked
|
||||
records = append(records, record)
|
||||
}
|
||||
nextPage := getNextPageNumber(response.PageNumber, defaultAlibabaCloudPageSize, response.TotalItems)
|
||||
nextPage := getNextPageNumber(int64(response.PageNumber), defaultAlibabaCloudPageSize, int64(response.TotalItems))
|
||||
if nextPage == 0 {
|
||||
break
|
||||
} else {
|
||||
request.PageNumber = requests.NewInteger(nextPage)
|
||||
request.PageNumber = requests.NewInteger64(nextPage)
|
||||
}
|
||||
}
|
||||
|
||||
@ -830,7 +816,6 @@ func (p *AlibabaCloudProvider) getPrivateZones() (map[string]*alibabaPrivateZone
|
||||
}
|
||||
|
||||
func (p *AlibabaCloudProvider) groupPrivateZoneRecords(zone *alibabaPrivateZone) (endpointMap map[string][]pvtz.Record) {
|
||||
|
||||
endpointMap = make(map[string][]pvtz.Record)
|
||||
|
||||
for _, record := range zone.records {
|
||||
@ -846,7 +831,6 @@ func (p *AlibabaCloudProvider) groupPrivateZoneRecords(zone *alibabaPrivateZone)
|
||||
//
|
||||
// Returns the current records or an error if the operation failed.
|
||||
func (p *AlibabaCloudProvider) privateZoneRecords() (endpoints []*endpoint.Endpoint, _ error) {
|
||||
|
||||
zones, err := p.getPrivateZones()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -880,7 +864,7 @@ func (p *AlibabaCloudProvider) createPrivateZoneRecord(zones map[string]*alibaba
|
||||
rr, domain := p.splitDNSName(endpoint)
|
||||
zone := zones[domain]
|
||||
if zone == nil {
|
||||
err := fmt.Errorf("Failed to find private zone '%s'", domain)
|
||||
err := fmt.Errorf("failed to find private zone '%s'", domain)
|
||||
log.Errorf("Failed to create %s record named '%s' to '%s' for Alibaba Cloud Private Zone: %v", endpoint.RecordType, endpoint.DNSName, target, err)
|
||||
return err
|
||||
}
|
||||
@ -925,14 +909,13 @@ func (p *AlibabaCloudProvider) createPrivateZoneRecords(zones map[string]*alibab
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AlibabaCloudProvider) deletePrivateZoneRecord(recordID int) error {
|
||||
|
||||
func (p *AlibabaCloudProvider) deletePrivateZoneRecord(recordID int64) error {
|
||||
if p.dryRun {
|
||||
log.Infof("Dry run: Delete record id '%d' in Alibaba Cloud Private Zone", recordID)
|
||||
}
|
||||
|
||||
request := pvtz.CreateDeleteZoneRecordRequest()
|
||||
request.RecordId = requests.NewInteger(recordID)
|
||||
request.RecordId = requests.NewInteger64(recordID)
|
||||
request.Domain = pVTZDoamin
|
||||
|
||||
response, err := p.getPvtzClient().DeleteZoneRecord(request)
|
||||
@ -950,7 +933,7 @@ func (p *AlibabaCloudProvider) deletePrivateZoneRecords(zones map[string]*alibab
|
||||
|
||||
zone := zones[domain]
|
||||
if zone == nil {
|
||||
err := fmt.Errorf("Failed to find private zone '%s'", domain)
|
||||
err := fmt.Errorf("failed to find private zone '%s'", domain)
|
||||
log.Errorf("Failed to delete %s record named '%s' for Alibaba Cloud Private Zone: %v", endpoint.RecordType, endpoint.DNSName, err)
|
||||
continue
|
||||
}
|
||||
@ -1001,7 +984,7 @@ func (p *AlibabaCloudProvider) applyChangesForPrivateZone(changes *plan.Changes)
|
||||
|
||||
func (p *AlibabaCloudProvider) updatePrivateZoneRecord(record pvtz.Record, endpoint *endpoint.Endpoint) error {
|
||||
request := pvtz.CreateUpdateZoneRecordRequest()
|
||||
request.RecordId = requests.NewInteger(record.RecordId)
|
||||
request.RecordId = requests.NewInteger64(record.RecordId)
|
||||
request.Rr = record.Rr
|
||||
request.Type = record.Type
|
||||
request.Value = record.Value
|
||||
@ -1034,12 +1017,11 @@ func (p *AlibabaCloudProvider) equalsPrivateZone(record pvtz.Record, endpoint *e
|
||||
}
|
||||
|
||||
func (p *AlibabaCloudProvider) updatePrivateZoneRecords(zones map[string]*alibabaPrivateZone, endpoints []*endpoint.Endpoint) error {
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
rr, domain := p.splitDNSName(endpoint)
|
||||
zone := zones[domain]
|
||||
if zone == nil {
|
||||
err := fmt.Errorf("Failed to find private zone '%s'", domain)
|
||||
err := fmt.Errorf("failed to find private zone '%s'", domain)
|
||||
log.Errorf("Failed to update %s record named '%s' for Alibaba Cloud Private Zone: %v", endpoint.RecordType, endpoint.DNSName, err)
|
||||
continue
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/pvtz"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
)
|
||||
|
||||
@ -61,7 +60,7 @@ func (m *MockAlibabaCloudDNSAPI) AddDomainRecord(request *alidns.AddDomainRecord
|
||||
RecordId: "3",
|
||||
DomainName: request.DomainName,
|
||||
Type: request.Type,
|
||||
TTL: ttl,
|
||||
TTL: int64(ttl),
|
||||
RR: request.RR,
|
||||
Value: request.Value,
|
||||
})
|
||||
@ -83,7 +82,7 @@ func (m *MockAlibabaCloudDNSAPI) DeleteDomainRecord(request *alidns.DeleteDomain
|
||||
}
|
||||
|
||||
func (m *MockAlibabaCloudDNSAPI) UpdateDomainRecord(request *alidns.UpdateDomainRecordRequest) (response *alidns.UpdateDomainRecordResponse, err error) {
|
||||
ttl, _ := request.TTL.GetValue()
|
||||
ttl, _ := request.TTL.GetValue64()
|
||||
for i := range m.records {
|
||||
if m.records[i].RecordId == request.RecordId {
|
||||
m.records[i].TTL = ttl
|
||||
@ -169,7 +168,7 @@ func (m *MockAlibabaCloudPrivateZoneAPI) AddZoneRecord(request *pvtz.AddZoneReco
|
||||
}
|
||||
|
||||
func (m *MockAlibabaCloudPrivateZoneAPI) DeleteZoneRecord(request *pvtz.DeleteZoneRecordRequest) (response *pvtz.DeleteZoneRecordResponse, err error) {
|
||||
recordID, _ := request.RecordId.GetValue()
|
||||
recordID, _ := request.RecordId.GetValue64()
|
||||
|
||||
var result []pvtz.Record
|
||||
for _, record := range m.records {
|
||||
@ -183,7 +182,7 @@ func (m *MockAlibabaCloudPrivateZoneAPI) DeleteZoneRecord(request *pvtz.DeleteZo
|
||||
}
|
||||
|
||||
func (m *MockAlibabaCloudPrivateZoneAPI) UpdateZoneRecord(request *pvtz.UpdateZoneRecordRequest) (response *pvtz.UpdateZoneRecordResponse, err error) {
|
||||
recordID, _ := request.RecordId.GetValue()
|
||||
recordID, _ := request.RecordId.GetValue64()
|
||||
ttl, _ := request.Ttl.GetValue()
|
||||
for i := range m.records {
|
||||
if m.records[i].RecordId == recordID {
|
||||
|
@ -52,8 +52,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// see: https://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region
|
||||
// and: https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/using-govcloud-endpoints.html
|
||||
// see: https://docs.aws.amazon.com/general/latest/gr/elb.html
|
||||
canonicalHostedZones = map[string]string{
|
||||
// Application Load Balancers and Classic Load Balancers
|
||||
"us-east-2.elb.amazonaws.com": "Z3AADJGX6KTTL2",
|
||||
@ -74,9 +73,10 @@ var (
|
||||
"eu-west-3.elb.amazonaws.com": "Z3Q77PNBQS71R4",
|
||||
"eu-north-1.elb.amazonaws.com": "Z23TAZ6LKFMNIO",
|
||||
"sa-east-1.elb.amazonaws.com": "Z2P70J7HTTTPLU",
|
||||
"cn-north-1.elb.amazonaws.com.cn": "Z3BX2TMKNYI13Y",
|
||||
"cn-northwest-1.elb.amazonaws.com.cn": "Z3BX2TMKNYI13Y",
|
||||
"us-gov-west-1.amazonaws.com": "Z1K6XKP9SAGWDV",
|
||||
"cn-north-1.elb.amazonaws.com.cn": "Z1GDH35T77C1KE",
|
||||
"cn-northwest-1.elb.amazonaws.com.cn": "ZM7IZAIOVVDZF",
|
||||
"us-gov-west-1.elb.amazonaws.com": "Z33AYJ8TM3BH4J",
|
||||
"us-gov-east-1.elb.amazonaws.com": "Z166TLBEWOO7G0",
|
||||
"me-south-1.elb.amazonaws.com": "ZS929ML54UICD",
|
||||
// Network Load Balancers
|
||||
"elb.us-east-2.amazonaws.com": "ZLMOA37VPKANP",
|
||||
@ -98,6 +98,8 @@ var (
|
||||
"elb.sa-east-1.amazonaws.com": "ZTK26PT1VY4CU",
|
||||
"elb.cn-north-1.amazonaws.com.cn": "Z3QFB96KMJ7ED6",
|
||||
"elb.cn-northwest-1.amazonaws.com.cn": "ZQEIKTCZ8352D",
|
||||
"elb.us-gov-west-1.amazonaws.com": "ZMG1MZ2THAWF1",
|
||||
"elb.us-gov-east-1.amazonaws.com": "Z1ZSMQQ6Q24QQ8",
|
||||
"elb.me-south-1.amazonaws.com": "Z3QSRYVP46NYYV",
|
||||
}
|
||||
)
|
||||
@ -114,6 +116,7 @@ type Route53API interface {
|
||||
|
||||
// AWSProvider is an implementation of Provider for AWS Route53.
|
||||
type AWSProvider struct {
|
||||
provider.BaseProvider
|
||||
client Route53API
|
||||
dryRun bool
|
||||
batchChangeSize int
|
||||
@ -450,7 +453,7 @@ func (p *AWSProvider) submitChanges(ctx context.Context, changes []*route53.Chan
|
||||
}
|
||||
|
||||
if len(failedZones) > 0 {
|
||||
return fmt.Errorf("Failed to submit all changes for the following zones: %v", failedZones)
|
||||
return fmt.Errorf("failed to submit all changes for the following zones: %v", failedZones)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -589,7 +592,8 @@ func (p *AWSProvider) tagsForZone(ctx context.Context, zoneID string) (map[strin
|
||||
|
||||
func batchChangeSet(cs []*route53.Change, batchSize int) [][]*route53.Change {
|
||||
if len(cs) <= batchSize {
|
||||
return [][]*route53.Change{cs}
|
||||
res := sortChangesByActionNameType(cs)
|
||||
return [][]*route53.Change{res}
|
||||
}
|
||||
|
||||
batchChanges := make([][]*route53.Change, 0)
|
||||
@ -636,10 +640,10 @@ func batchChangeSet(cs []*route53.Change, batchSize int) [][]*route53.Change {
|
||||
|
||||
func sortChangesByActionNameType(cs []*route53.Change) []*route53.Change {
|
||||
sort.SliceStable(cs, func(i, j int) bool {
|
||||
if *cs[i].Action < *cs[j].Action {
|
||||
if *cs[i].Action > *cs[j].Action {
|
||||
return true
|
||||
}
|
||||
if *cs[i].Action > *cs[j].Action {
|
||||
if *cs[i].Action < *cs[j].Action {
|
||||
return false
|
||||
}
|
||||
if *cs[i].ResourceRecordSet.Name < *cs[j].ResourceRecordSet.Name {
|
||||
@ -734,7 +738,6 @@ func isAWSAlias(ep *endpoint.Endpoint, addrs []*endpoint.Endpoint) string {
|
||||
if hostedZone := canonicalHostedZone(addr.Targets[0]); hostedZone != "" {
|
||||
return hostedZone
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1024,8 +1024,8 @@ func TestAWSCanonicalHostedZone(t *testing.T) {
|
||||
{"foo.eu-west-2.elb.amazonaws.com", "ZHURV8PSTC4K8"},
|
||||
{"foo.eu-west-3.elb.amazonaws.com", "Z3Q77PNBQS71R4"},
|
||||
{"foo.sa-east-1.elb.amazonaws.com", "Z2P70J7HTTTPLU"},
|
||||
{"foo.cn-north-1.elb.amazonaws.com.cn", "Z3BX2TMKNYI13Y"},
|
||||
{"foo.cn-northwest-1.elb.amazonaws.com.cn", "Z3BX2TMKNYI13Y"},
|
||||
{"foo.cn-north-1.elb.amazonaws.com.cn", "Z1GDH35T77C1KE"},
|
||||
{"foo.cn-northwest-1.elb.amazonaws.com.cn", "ZM7IZAIOVVDZF"},
|
||||
// Network Load Balancers
|
||||
{"foo.elb.us-east-2.amazonaws.com", "ZLMOA37VPKANP"},
|
||||
{"foo.elb.us-east-1.amazonaws.com", "Z26RNL4JYFTOTI"},
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/pkg/apis/externaldns"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -59,7 +60,7 @@ var (
|
||||
)
|
||||
|
||||
// AWSSDClient is the subset of the AWS Cloud Map API that we actually use. Add methods as required.
|
||||
// Signatures must match exactly. Taken from https://github.com/aws/aws-sdk-go/blob/master/service/servicediscovery/api.go
|
||||
// Signatures must match exactly. Taken from https://github.com/aws/aws-sdk-go/blob/HEAD/service/servicediscovery/api.go
|
||||
type AWSSDClient interface {
|
||||
CreateService(input *sd.CreateServiceInput) (*sd.CreateServiceOutput, error)
|
||||
DeregisterInstance(input *sd.DeregisterInstanceInput) (*sd.DeregisterInstanceOutput, error)
|
||||
@ -73,6 +74,7 @@ type AWSSDClient interface {
|
||||
|
||||
// AWSSDProvider is an implementation of Provider for AWS Cloud Map.
|
||||
type AWSSDProvider struct {
|
||||
provider.BaseProvider
|
||||
client AWSSDClient
|
||||
dryRun bool
|
||||
// only consider namespaces ending in this suffix
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -51,7 +52,7 @@ type AWSSDClientStub struct {
|
||||
func (s *AWSSDClientStub) CreateService(input *sd.CreateServiceInput) (*sd.CreateServiceOutput, error) {
|
||||
|
||||
srv := &sd.Service{
|
||||
Id: aws.String(string(rand.Intn(10000))),
|
||||
Id: aws.String(strconv.Itoa(rand.Intn(10000))),
|
||||
DnsConfig: input.DnsConfig,
|
||||
Name: input.Name,
|
||||
Description: input.Description,
|
||||
|
@ -67,6 +67,7 @@ type RecordSetsClient interface {
|
||||
|
||||
// AzureProvider implements the DNS provider for Microsoft's Azure cloud platform.
|
||||
type AzureProvider struct {
|
||||
provider.BaseProvider
|
||||
domainFilter endpoint.DomainFilter
|
||||
zoneIDFilter provider.ZoneIDFilter
|
||||
dryRun bool
|
||||
|
@ -46,6 +46,7 @@ type PrivateRecordSetsClient interface {
|
||||
|
||||
// AzurePrivateDNSProvider implements the DNS provider for Microsoft's Azure Private DNS service
|
||||
type AzurePrivateDNSProvider struct {
|
||||
provider.BaseProvider
|
||||
domainFilter endpoint.DomainFilter
|
||||
zoneIDFilter provider.ZoneIDFilter
|
||||
dryRun bool
|
||||
|
2
provider/cloudflare/OWNERS
Normal file
2
provider/cloudflare/OWNERS
Normal file
@ -0,0 +1,2 @@
|
||||
approvers:
|
||||
- sheerun
|
@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -59,6 +58,7 @@ type cloudFlareDNS interface {
|
||||
ZoneIDByName(zoneName string) (string, error)
|
||||
ListZones(zoneID ...string) ([]cloudflare.Zone, error)
|
||||
ListZonesContext(ctx context.Context, opts ...cloudflare.ReqOption) (cloudflare.ZonesResponse, error)
|
||||
ZoneDetails(zoneID string) (cloudflare.Zone, error)
|
||||
DNSRecords(zoneID string, rr cloudflare.DNSRecord) ([]cloudflare.DNSRecord, error)
|
||||
CreateDNSRecord(zoneID string, rr cloudflare.DNSRecord) (*cloudflare.DNSRecordResponse, error)
|
||||
DeleteDNSRecord(zoneID, recordID string) error
|
||||
@ -99,8 +99,13 @@ func (z zoneService) ListZonesContext(ctx context.Context, opts ...cloudflare.Re
|
||||
return z.service.ListZonesContext(ctx, opts...)
|
||||
}
|
||||
|
||||
func (z zoneService) ZoneDetails(zoneID string) (cloudflare.Zone, error) {
|
||||
return z.service.ZoneDetails(zoneID)
|
||||
}
|
||||
|
||||
// CloudFlareProvider is an implementation of Provider for CloudFlare DNS.
|
||||
type CloudFlareProvider struct {
|
||||
provider.BaseProvider
|
||||
Client cloudFlareDNS
|
||||
// only consider hosted zones managing domains ending in this suffix
|
||||
domainFilter endpoint.DomainFilter
|
||||
@ -112,8 +117,8 @@ type CloudFlareProvider struct {
|
||||
|
||||
// cloudFlareChange differentiates between ChangActions
|
||||
type cloudFlareChange struct {
|
||||
Action string
|
||||
ResourceRecordSet []cloudflare.DNSRecord
|
||||
Action string
|
||||
ResourceRecord cloudflare.DNSRecord
|
||||
}
|
||||
|
||||
// NewCloudFlareProvider initializes a new CloudFlare DNS based Provider.
|
||||
@ -151,6 +156,27 @@ func (p *CloudFlareProvider) Zones(ctx context.Context) ([]cloudflare.Zone, erro
|
||||
result := []cloudflare.Zone{}
|
||||
p.PaginationOptions.Page = 1
|
||||
|
||||
// if there is a zoneIDfilter configured
|
||||
// && if the filter isnt just a blank string (used in tests)
|
||||
if len(p.zoneIDFilter.ZoneIDs) > 0 && p.zoneIDFilter.ZoneIDs[0] != "" {
|
||||
log.Debugln("zoneIDFilter configured. only looking up zone IDs defined")
|
||||
for _, zoneID := range p.zoneIDFilter.ZoneIDs {
|
||||
log.Debugf("looking up zone %s", zoneID)
|
||||
detailResponse, err := p.Client.ZoneDetails(zoneID)
|
||||
if err != nil {
|
||||
log.Errorf("zone %s lookup failed, %v", zoneID, err)
|
||||
continue
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
"zoneName": detailResponse.Name,
|
||||
"zoneID": detailResponse.ID,
|
||||
}).Debugln("adding zone for consideration")
|
||||
result = append(result, detailResponse)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
log.Debugln("no zoneIDFilter configured, looking at all zones")
|
||||
for {
|
||||
zonesResponse, err := p.Client.ListZonesContext(ctx, cloudflare.WithPagination(p.PaginationOptions))
|
||||
if err != nil {
|
||||
@ -159,10 +185,7 @@ func (p *CloudFlareProvider) Zones(ctx context.Context) ([]cloudflare.Zone, erro
|
||||
|
||||
for _, zone := range zonesResponse.Result {
|
||||
if !p.domainFilter.Match(zone.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !p.zoneIDFilter.Match(zone.ID) {
|
||||
log.Debugf("zone %s not in domain filter", zone.Name)
|
||||
continue
|
||||
}
|
||||
result = append(result, zone)
|
||||
@ -200,15 +223,47 @@ func (p *CloudFlareProvider) Records(ctx context.Context) ([]*endpoint.Endpoint,
|
||||
|
||||
// ApplyChanges applies a given set of changes in a given zone.
|
||||
func (p *CloudFlareProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
|
||||
proxiedByDefault := p.proxiedByDefault
|
||||
cloudflareChanges := []*cloudFlareChange{}
|
||||
|
||||
combinedChanges := make([]*cloudFlareChange, 0, len(changes.Create)+len(changes.UpdateNew)+len(changes.Delete))
|
||||
for _, endpoint := range changes.Create {
|
||||
for _, target := range endpoint.Targets {
|
||||
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareCreate, endpoint, target))
|
||||
}
|
||||
}
|
||||
|
||||
combinedChanges = append(combinedChanges, newCloudFlareChanges(cloudFlareCreate, changes.Create, proxiedByDefault)...)
|
||||
combinedChanges = append(combinedChanges, newCloudFlareChanges(cloudFlareUpdate, changes.UpdateNew, proxiedByDefault)...)
|
||||
combinedChanges = append(combinedChanges, newCloudFlareChanges(cloudFlareDelete, changes.Delete, proxiedByDefault)...)
|
||||
for i, desired := range changes.UpdateNew {
|
||||
current := changes.UpdateOld[i]
|
||||
|
||||
return p.submitChanges(ctx, combinedChanges)
|
||||
add, remove, leave := provider.Difference(current.Targets, desired.Targets)
|
||||
|
||||
for _, a := range add {
|
||||
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareCreate, desired, a))
|
||||
}
|
||||
|
||||
for _, a := range leave {
|
||||
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareUpdate, desired, a))
|
||||
}
|
||||
|
||||
for _, a := range remove {
|
||||
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareDelete, current, a))
|
||||
}
|
||||
}
|
||||
|
||||
for _, endpoint := range changes.Delete {
|
||||
for _, target := range endpoint.Targets {
|
||||
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareDelete, endpoint, target))
|
||||
}
|
||||
}
|
||||
|
||||
return p.submitChanges(ctx, cloudflareChanges)
|
||||
}
|
||||
|
||||
func (p *CloudFlareProvider) PropertyValuesEqual(name string, previous string, current string) bool {
|
||||
if name == source.CloudflareProxiedKey {
|
||||
return plan.CompareBoolean(p.proxiedByDefault, name, previous, current)
|
||||
}
|
||||
|
||||
return p.BaseProvider.PropertyValuesEqual(name, previous, current)
|
||||
}
|
||||
|
||||
// submitChanges takes a zone and a collection of Changes and sends them as a single transaction.
|
||||
@ -232,12 +287,11 @@ func (p *CloudFlareProvider) submitChanges(ctx context.Context, changes []*cloud
|
||||
}
|
||||
for _, change := range changes {
|
||||
logFields := log.Fields{
|
||||
"record": change.ResourceRecordSet[0].Name,
|
||||
"type": change.ResourceRecordSet[0].Type,
|
||||
"ttl": change.ResourceRecordSet[0].TTL,
|
||||
"targets": len(change.ResourceRecordSet),
|
||||
"action": change.Action,
|
||||
"zone": zoneID,
|
||||
"record": change.ResourceRecord.Name,
|
||||
"type": change.ResourceRecord.Type,
|
||||
"ttl": change.ResourceRecord.TTL,
|
||||
"action": change.Action,
|
||||
"zone": zoneID,
|
||||
}
|
||||
|
||||
log.WithFields(logFields).Info("Changing record.")
|
||||
@ -246,24 +300,30 @@ func (p *CloudFlareProvider) submitChanges(ctx context.Context, changes []*cloud
|
||||
continue
|
||||
}
|
||||
|
||||
recordIDs := p.getRecordIDs(records, change.ResourceRecordSet[0])
|
||||
|
||||
// to simplify bookkeeping for multiple records, an update is executed as delete+create
|
||||
if change.Action == cloudFlareDelete || change.Action == cloudFlareUpdate {
|
||||
for _, recordID := range recordIDs {
|
||||
err := p.Client.DeleteDNSRecord(zoneID, recordID)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).Errorf("failed to delete record: %v", err)
|
||||
}
|
||||
if change.Action == cloudFlareUpdate {
|
||||
recordID := p.getRecordID(records, change.ResourceRecord)
|
||||
if recordID == "" {
|
||||
log.WithFields(logFields).Errorf("failed to find previous record: %v", change.ResourceRecord)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if change.Action == cloudFlareCreate || change.Action == cloudFlareUpdate {
|
||||
for _, record := range change.ResourceRecordSet {
|
||||
_, err := p.Client.CreateDNSRecord(zoneID, record)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).Errorf("failed to create record: %v", err)
|
||||
}
|
||||
err := p.Client.UpdateDNSRecord(zoneID, recordID, change.ResourceRecord)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).Errorf("failed to delete record: %v", err)
|
||||
}
|
||||
} else if change.Action == cloudFlareDelete {
|
||||
recordID := p.getRecordID(records, change.ResourceRecord)
|
||||
if recordID == "" {
|
||||
log.WithFields(logFields).Errorf("failed to find previous record: %v", change.ResourceRecord)
|
||||
continue
|
||||
}
|
||||
err := p.Client.DeleteDNSRecord(zoneID, recordID)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).Errorf("failed to delete record: %v", err)
|
||||
}
|
||||
} else if change.Action == cloudFlareCreate {
|
||||
_, err := p.Client.CreateDNSRecord(zoneID, change.ResourceRecord)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).Errorf("failed to create record: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -282,9 +342,9 @@ func (p *CloudFlareProvider) changesByZone(zones []cloudflare.Zone, changeSet []
|
||||
}
|
||||
|
||||
for _, c := range changeSet {
|
||||
zoneID, _ := zoneNameIDMapper.FindZone(c.ResourceRecordSet[0].Name)
|
||||
zoneID, _ := zoneNameIDMapper.FindZone(c.ResourceRecord.Name)
|
||||
if zoneID == "" {
|
||||
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", c.ResourceRecordSet[0].Name)
|
||||
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", c.ResourceRecord.Name)
|
||||
continue
|
||||
}
|
||||
changes[zoneID] = append(changes[zoneID], c)
|
||||
@ -293,51 +353,36 @@ func (p *CloudFlareProvider) changesByZone(zones []cloudflare.Zone, changeSet []
|
||||
return changes
|
||||
}
|
||||
|
||||
func (p *CloudFlareProvider) getRecordIDs(records []cloudflare.DNSRecord, record cloudflare.DNSRecord) []string {
|
||||
recordIDs := make([]string, 0)
|
||||
func (p *CloudFlareProvider) getRecordID(records []cloudflare.DNSRecord, record cloudflare.DNSRecord) string {
|
||||
for _, zoneRecord := range records {
|
||||
if zoneRecord.Name == record.Name && zoneRecord.Type == record.Type {
|
||||
recordIDs = append(recordIDs, zoneRecord.ID)
|
||||
if zoneRecord.Name == record.Name && zoneRecord.Type == record.Type && zoneRecord.Content == record.Content {
|
||||
return zoneRecord.ID
|
||||
}
|
||||
}
|
||||
sort.Strings(recordIDs)
|
||||
return recordIDs
|
||||
return ""
|
||||
}
|
||||
|
||||
// newCloudFlareChanges returns a collection of Changes based on the given records and action.
|
||||
func newCloudFlareChanges(action string, endpoints []*endpoint.Endpoint, proxiedByDefault bool) []*cloudFlareChange {
|
||||
changes := make([]*cloudFlareChange, 0, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
changes = append(changes, newCloudFlareChange(action, endpoint, proxiedByDefault))
|
||||
}
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newCloudFlareChange(action string, endpoint *endpoint.Endpoint, proxiedByDefault bool) *cloudFlareChange {
|
||||
func (p *CloudFlareProvider) newCloudFlareChange(action string, endpoint *endpoint.Endpoint, target string) *cloudFlareChange {
|
||||
ttl := defaultCloudFlareRecordTTL
|
||||
proxied := shouldBeProxied(endpoint, proxiedByDefault)
|
||||
proxied := shouldBeProxied(endpoint, p.proxiedByDefault)
|
||||
|
||||
if endpoint.RecordTTL.IsConfigured() {
|
||||
ttl = int(endpoint.RecordTTL)
|
||||
}
|
||||
|
||||
resourceRecordSet := make([]cloudflare.DNSRecord, len(endpoint.Targets))
|
||||
if len(endpoint.Targets) > 1 {
|
||||
log.Errorf("Updates should have just one target")
|
||||
}
|
||||
|
||||
for i := range endpoint.Targets {
|
||||
resourceRecordSet[i] = cloudflare.DNSRecord{
|
||||
return &cloudFlareChange{
|
||||
Action: action,
|
||||
ResourceRecord: cloudflare.DNSRecord{
|
||||
Name: endpoint.DNSName,
|
||||
TTL: ttl,
|
||||
Proxied: proxied,
|
||||
Type: endpoint.RecordType,
|
||||
Content: endpoint.Targets[i],
|
||||
}
|
||||
}
|
||||
|
||||
return &cloudFlareChange{
|
||||
Action: action,
|
||||
ResourceRecordSet: resourceRecordSet,
|
||||
Content: target,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,15 @@ var ExampleDomain = []cloudflare.DNSRecord{
|
||||
Content: "1.2.3.4",
|
||||
Proxied: false,
|
||||
},
|
||||
{
|
||||
ID: "2345678901",
|
||||
ZoneID: "001",
|
||||
Name: "foobar.bar.com",
|
||||
Type: endpoint.RecordTypeA,
|
||||
TTL: 120,
|
||||
Content: "3.4.5.6",
|
||||
Proxied: false,
|
||||
},
|
||||
{
|
||||
ID: "1231231233",
|
||||
ZoneID: "002",
|
||||
@ -207,6 +216,19 @@ func (m *mockCloudFlareClient) ListZonesContext(ctx context.Context, opts ...clo
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockCloudFlareClient) ZoneDetails(zoneID string) (cloudflare.Zone, error) {
|
||||
for id, zoneName := range m.Zones {
|
||||
if zoneID == id {
|
||||
return cloudflare.Zone{
|
||||
ID: zoneID,
|
||||
Name: zoneName,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return cloudflare.Zone{}, errors.New("Unknown zoneID: " + zoneID)
|
||||
}
|
||||
|
||||
func AssertActions(t *testing.T, provider *CloudFlareProvider, endpoints []*endpoint.Endpoint, actions []MockAction, args ...interface{}) {
|
||||
t.Helper()
|
||||
|
||||
@ -523,6 +545,25 @@ func TestCloudflareZones(t *testing.T) {
|
||||
assert.Equal(t, "bar.com", zones[0].Name)
|
||||
}
|
||||
|
||||
func TestCloudFlareZonesWithIDFilter(t *testing.T) {
|
||||
client := NewMockCloudFlareClient()
|
||||
client.listZonesError = errors.New("shouldn't need to list zones when ZoneIDFilter in use")
|
||||
provider := &CloudFlareProvider{
|
||||
Client: client,
|
||||
domainFilter: endpoint.NewDomainFilter([]string{"bar.com", "foo.com"}),
|
||||
zoneIDFilter: provider.NewZoneIDFilter([]string{"001"}),
|
||||
}
|
||||
|
||||
zones, err := provider.Zones(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// foo.com should *not* be returned as it doesn't match ZoneID filter
|
||||
assert.Equal(t, 1, len(zones))
|
||||
assert.Equal(t, "bar.com", zones[0].Name)
|
||||
}
|
||||
|
||||
func TestCloudflareRecords(t *testing.T) {
|
||||
client := NewMockCloudFlareClientWithRecords(map[string][]cloudflare.DNSRecord{
|
||||
"001": ExampleDomain,
|
||||
@ -656,29 +697,51 @@ func TestCloudflareGetRecordID(t *testing.T) {
|
||||
p := &CloudFlareProvider{}
|
||||
records := []cloudflare.DNSRecord{
|
||||
{
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
ID: "1",
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Content: "foobar",
|
||||
ID: "1",
|
||||
},
|
||||
{
|
||||
Name: "bar.de",
|
||||
Type: endpoint.RecordTypeA,
|
||||
ID: "2",
|
||||
},
|
||||
{
|
||||
Name: "bar.de",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Content: "1.2.3.4",
|
||||
ID: "2",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Len(t, p.getRecordIDs(records, cloudflare.DNSRecord{
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeA,
|
||||
}), 0)
|
||||
assert.Len(t, p.getRecordIDs(records, cloudflare.DNSRecord{
|
||||
Name: "bar.de",
|
||||
Type: endpoint.RecordTypeA,
|
||||
}), 1)
|
||||
assert.Equal(t, "2", p.getRecordIDs(records, cloudflare.DNSRecord{
|
||||
Name: "bar.de",
|
||||
Type: endpoint.RecordTypeA,
|
||||
})[0])
|
||||
assert.Equal(t, "", p.getRecordID(records, cloudflare.DNSRecord{
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Content: "foobar",
|
||||
}))
|
||||
|
||||
assert.Equal(t, "", p.getRecordID(records, cloudflare.DNSRecord{
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Content: "fizfuz",
|
||||
}))
|
||||
|
||||
assert.Equal(t, "1", p.getRecordID(records, cloudflare.DNSRecord{
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Content: "foobar",
|
||||
}))
|
||||
assert.Equal(t, "", p.getRecordID(records, cloudflare.DNSRecord{
|
||||
Name: "bar.de",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Content: "2.3.4.5",
|
||||
}))
|
||||
assert.Equal(t, "2", p.getRecordID(records, cloudflare.DNSRecord{
|
||||
Name: "bar.de",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Content: "1.2.3.4",
|
||||
}))
|
||||
}
|
||||
|
||||
func TestCloudflareGroupByNameAndType(t *testing.T) {
|
||||
@ -903,6 +966,98 @@ func TestCloudflareGroupByNameAndType(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderPropertiesIdempotency(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ProviderProxiedByDefault bool
|
||||
RecordsAreProxied bool
|
||||
ShouldBeUpdated bool
|
||||
}{
|
||||
{
|
||||
ProviderProxiedByDefault: false,
|
||||
RecordsAreProxied: false,
|
||||
ShouldBeUpdated: false,
|
||||
},
|
||||
{
|
||||
ProviderProxiedByDefault: true,
|
||||
RecordsAreProxied: true,
|
||||
ShouldBeUpdated: false,
|
||||
},
|
||||
{
|
||||
ProviderProxiedByDefault: true,
|
||||
RecordsAreProxied: false,
|
||||
ShouldBeUpdated: true,
|
||||
},
|
||||
{
|
||||
ProviderProxiedByDefault: false,
|
||||
RecordsAreProxied: true,
|
||||
ShouldBeUpdated: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
client := NewMockCloudFlareClientWithRecords(map[string][]cloudflare.DNSRecord{
|
||||
"001": {
|
||||
{
|
||||
ID: "1234567890",
|
||||
ZoneID: "001",
|
||||
Name: "foobar.bar.com",
|
||||
Type: endpoint.RecordTypeA,
|
||||
TTL: 120,
|
||||
Content: "1.2.3.4",
|
||||
Proxied: test.RecordsAreProxied,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
provider := &CloudFlareProvider{
|
||||
Client: client,
|
||||
proxiedByDefault: test.ProviderProxiedByDefault,
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
current, err := provider.Records(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("should not fail, %s", err)
|
||||
}
|
||||
assert.Equal(t, 1, len(current))
|
||||
|
||||
desired := []*endpoint.Endpoint{}
|
||||
for _, c := range current {
|
||||
// Copy all except ProviderSpecific fields
|
||||
desired = append(desired, &endpoint.Endpoint{
|
||||
DNSName: c.DNSName,
|
||||
Targets: c.Targets,
|
||||
RecordType: c.RecordType,
|
||||
SetIdentifier: c.SetIdentifier,
|
||||
RecordTTL: c.RecordTTL,
|
||||
Labels: c.Labels,
|
||||
})
|
||||
}
|
||||
|
||||
plan := plan.Plan{
|
||||
Current: current,
|
||||
Desired: desired,
|
||||
PropertyComparator: provider.PropertyValuesEqual,
|
||||
}
|
||||
|
||||
plan = *plan.Calculate()
|
||||
assert.NotNil(t, plan.Changes, "should have plan")
|
||||
if plan.Changes == nil {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, 0, len(plan.Changes.Create), "should not have creates")
|
||||
assert.Equal(t, 0, len(plan.Changes.Delete), "should not have deletes")
|
||||
|
||||
if test.ShouldBeUpdated {
|
||||
assert.Equal(t, 1, len(plan.Changes.UpdateNew), "should not have new updates")
|
||||
assert.Equal(t, 1, len(plan.Changes.UpdateOld), "should not have old updates")
|
||||
} else {
|
||||
assert.Equal(t, 0, len(plan.Changes.UpdateNew), "should not have new updates")
|
||||
assert.Equal(t, 0, len(plan.Changes.UpdateOld), "should not have old updates")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudflareComplexUpdate(t *testing.T) {
|
||||
client := NewMockCloudFlareClientWithRecords(map[string][]cloudflare.DNSRecord{
|
||||
"001": ExampleDomain,
|
||||
@ -948,23 +1103,7 @@ func TestCloudflareComplexUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
td.CmpDeeply(t, client.Actions, []MockAction{
|
||||
{
|
||||
Name: "Delete",
|
||||
ZoneId: "001",
|
||||
RecordId: "1234567890",
|
||||
},
|
||||
{
|
||||
Name: "Create",
|
||||
ZoneId: "001",
|
||||
RecordData: cloudflare.DNSRecord{
|
||||
Name: "foobar.bar.com",
|
||||
Type: "A",
|
||||
Content: "1.2.3.4",
|
||||
TTL: 1,
|
||||
Proxied: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
MockAction{
|
||||
Name: "Create",
|
||||
ZoneId: "001",
|
||||
RecordData: cloudflare.DNSRecord{
|
||||
@ -975,5 +1114,22 @@ func TestCloudflareComplexUpdate(t *testing.T) {
|
||||
Proxied: true,
|
||||
},
|
||||
},
|
||||
MockAction{
|
||||
Name: "Update",
|
||||
ZoneId: "001",
|
||||
RecordId: "1234567890",
|
||||
RecordData: cloudflare.DNSRecord{
|
||||
Name: "foobar.bar.com",
|
||||
Type: "A",
|
||||
Content: "1.2.3.4",
|
||||
TTL: 1,
|
||||
Proxied: true,
|
||||
},
|
||||
},
|
||||
MockAction{
|
||||
Name: "Delete",
|
||||
ZoneId: "001",
|
||||
RecordId: "2345678901",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
2
provider/coredns/OWNERS
Normal file
2
provider/coredns/OWNERS
Normal file
@ -0,0 +1,2 @@
|
||||
approvers:
|
||||
- ytsarev
|
@ -57,6 +57,7 @@ type coreDNSClient interface {
|
||||
}
|
||||
|
||||
type coreDNSProvider struct {
|
||||
provider.BaseProvider
|
||||
dryRun bool
|
||||
coreDNSPrefix string
|
||||
domainFilter endpoint.DomainFilter
|
||||
@ -85,7 +86,7 @@ type Service struct {
|
||||
// answer.
|
||||
Group string `json:"group,omitempty"`
|
||||
|
||||
// Etcd key where we found this service and ignored from json un-/marshalling
|
||||
// Etcd key where we found this service and ignored from json un-/marshaling
|
||||
Key string `json:"-"`
|
||||
}
|
||||
|
||||
@ -396,7 +397,6 @@ func (p coreDNSProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
index := 0
|
||||
for _, ep := range group {
|
||||
|
@ -227,6 +227,7 @@ func (c designateClient) DeleteRecordSet(zoneID, recordSetID string) error {
|
||||
|
||||
// designate provider type
|
||||
type designateProvider struct {
|
||||
provider.BaseProvider
|
||||
client designateClientInterface
|
||||
|
||||
// only consider hosted zones managing domains ending in this suffix
|
||||
|
@ -32,48 +32,66 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// DigitalOceanCreate is a ChangeAction enum value
|
||||
DigitalOceanCreate = "CREATE"
|
||||
// DigitalOceanDelete is a ChangeAction enum value
|
||||
DigitalOceanDelete = "DELETE"
|
||||
// DigitalOceanUpdate is a ChangeAction enum value
|
||||
DigitalOceanUpdate = "UPDATE"
|
||||
|
||||
// digitalOceanRecordTTL is the default TTL value
|
||||
digitalOceanRecordTTL = 300
|
||||
)
|
||||
|
||||
// DigitalOceanProvider is an implementation of Provider for Digital Ocean's DNS.
|
||||
type DigitalOceanProvider struct {
|
||||
provider.BaseProvider
|
||||
Client godo.DomainsService
|
||||
// only consider hosted zones managing domains ending in this suffix
|
||||
domainFilter endpoint.DomainFilter
|
||||
DryRun bool
|
||||
// page size when querying paginated APIs
|
||||
apiPageSize int
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// DigitalOceanChange differentiates between ChangActions
|
||||
type DigitalOceanChange struct {
|
||||
Action string
|
||||
ResourceRecordSet godo.DomainRecord
|
||||
type digitalOceanChangeCreate struct {
|
||||
Domain string
|
||||
Options *godo.DomainRecordEditRequest
|
||||
}
|
||||
|
||||
type digitalOceanChangeUpdate struct {
|
||||
Domain string
|
||||
DomainRecord godo.DomainRecord
|
||||
Options *godo.DomainRecordEditRequest
|
||||
}
|
||||
|
||||
type digitalOceanChangeDelete struct {
|
||||
Domain string
|
||||
RecordID int
|
||||
}
|
||||
|
||||
// DigitalOceanChange contains all changes to apply to DNS
|
||||
type digitalOceanChanges struct {
|
||||
Creates []*digitalOceanChangeCreate
|
||||
Updates []*digitalOceanChangeUpdate
|
||||
Deletes []*digitalOceanChangeDelete
|
||||
}
|
||||
|
||||
func (c *digitalOceanChanges) Empty() bool {
|
||||
return len(c.Creates) == 0 && len(c.Updates) == 0 && len(c.Deletes) == 0
|
||||
}
|
||||
|
||||
// NewDigitalOceanProvider initializes a new DigitalOcean DNS based Provider.
|
||||
func NewDigitalOceanProvider(ctx context.Context, domainFilter endpoint.DomainFilter, dryRun bool) (*DigitalOceanProvider, error) {
|
||||
func NewDigitalOceanProvider(ctx context.Context, domainFilter endpoint.DomainFilter, dryRun bool, apiPageSize int) (*DigitalOceanProvider, error) {
|
||||
token, ok := os.LookupEnv("DO_TOKEN")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("No token found")
|
||||
return nil, fmt.Errorf("no token found")
|
||||
}
|
||||
oauthClient := oauth2.NewClient(ctx, oauth2.StaticTokenSource(&oauth2.Token{
|
||||
AccessToken: token,
|
||||
}))
|
||||
client := godo.NewClient(oauthClient)
|
||||
|
||||
provider := &DigitalOceanProvider{
|
||||
p := &DigitalOceanProvider{
|
||||
Client: client.Domains,
|
||||
domainFilter: domainFilter,
|
||||
apiPageSize: apiPageSize,
|
||||
DryRun: dryRun,
|
||||
}
|
||||
return provider, nil
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Zones returns the list of hosted zones.
|
||||
@ -94,12 +112,45 @@ func (p *DigitalOceanProvider) Zones(ctx context.Context) ([]godo.Domain, error)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Merge Endpoints with the same Name and Type into a single endpoint with multiple Targets.
|
||||
func mergeEndpointsByNameType(endpoints []*endpoint.Endpoint) []*endpoint.Endpoint {
|
||||
endpointsByNameType := map[string][]*endpoint.Endpoint{}
|
||||
|
||||
for _, e := range endpoints {
|
||||
key := fmt.Sprintf("%s-%s", e.DNSName, e.RecordType)
|
||||
endpointsByNameType[key] = append(endpointsByNameType[key], e)
|
||||
}
|
||||
|
||||
// If no merge occurred, just return the existing endpoints.
|
||||
if len(endpointsByNameType) == len(endpoints) {
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// Otherwise, construct a new list of endpoints with the endpoints merged.
|
||||
var result []*endpoint.Endpoint
|
||||
for _, endpoints := range endpointsByNameType {
|
||||
dnsName := endpoints[0].DNSName
|
||||
recordType := endpoints[0].RecordType
|
||||
|
||||
targets := make([]string, len(endpoints))
|
||||
for i, e := range endpoints {
|
||||
targets[i] = e.Targets[0]
|
||||
}
|
||||
|
||||
e := endpoint.NewEndpoint(dnsName, recordType, targets...)
|
||||
result = append(result, e)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Records returns the list of records in a given zone.
|
||||
func (p *DigitalOceanProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
zones, err := p.Zones(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoints := []*endpoint.Endpoint{}
|
||||
for _, zone := range zones {
|
||||
records, err := p.fetchRecords(ctx, zone.Name)
|
||||
@ -117,17 +168,27 @@ func (p *DigitalOceanProvider) Records(ctx context.Context) ([]*endpoint.Endpoin
|
||||
name = zone.Name
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, endpoint.NewEndpoint(name, r.Type, r.Data))
|
||||
ep := endpoint.NewEndpoint(name, r.Type, r.Data)
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Merge endpoints with the same name and type (e.g., multiple A records for a single
|
||||
// DNS name) into one endpoint with multiple targets.
|
||||
endpoints = mergeEndpointsByNameType(endpoints)
|
||||
|
||||
// Log the endpoints that were found.
|
||||
log.WithFields(log.Fields{
|
||||
"endpoints": endpoints,
|
||||
}).Debug("Endpoints generated from DigitalOcean DNS")
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (p *DigitalOceanProvider) fetchRecords(ctx context.Context, zoneName string) ([]godo.DomainRecord, error) {
|
||||
allRecords := []godo.DomainRecord{}
|
||||
listOptions := &godo.ListOptions{}
|
||||
listOptions := &godo.ListOptions{PerPage: p.apiPageSize}
|
||||
for {
|
||||
records, resp, err := p.Client.Records(ctx, zoneName, listOptions)
|
||||
if err != nil {
|
||||
@ -152,7 +213,7 @@ func (p *DigitalOceanProvider) fetchRecords(ctx context.Context, zoneName string
|
||||
|
||||
func (p *DigitalOceanProvider) fetchZones(ctx context.Context) ([]godo.Domain, error) {
|
||||
allZones := []godo.Domain{}
|
||||
listOptions := &godo.ListOptions{}
|
||||
listOptions := &godo.ListOptions{PerPage: p.apiPageSize}
|
||||
for {
|
||||
zones, resp, err := p.Client.List(ctx, listOptions)
|
||||
if err != nil {
|
||||
@ -175,160 +236,379 @@ func (p *DigitalOceanProvider) fetchZones(ctx context.Context) ([]godo.Domain, e
|
||||
return allZones, nil
|
||||
}
|
||||
|
||||
// submitChanges takes a zone and a collection of Changes and sends them as a single transaction.
|
||||
func (p *DigitalOceanProvider) submitChanges(ctx context.Context, changes []*DigitalOceanChange) error {
|
||||
func (p *DigitalOceanProvider) getRecordsByDomain(ctx context.Context) (map[string][]godo.DomainRecord, provider.ZoneIDName, error) {
|
||||
recordsByDomain := map[string][]godo.DomainRecord{}
|
||||
|
||||
zones, err := p.Zones(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
zonesByDomain := make(map[string]godo.Domain)
|
||||
zoneNameIDMapper := provider.ZoneIDName{}
|
||||
for _, z := range zones {
|
||||
zoneNameIDMapper.Add(z.Name, z.Name)
|
||||
zonesByDomain[z.Name] = z
|
||||
}
|
||||
|
||||
// Fetch records for each zone
|
||||
for _, zone := range zones {
|
||||
records, err := p.fetchRecords(ctx, zone.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
recordsByDomain[zone.Name] = append(recordsByDomain[zone.Name], records...)
|
||||
}
|
||||
|
||||
return recordsByDomain, zoneNameIDMapper, nil
|
||||
}
|
||||
|
||||
// Make a DomainRecordEditRequest that conforms to DigitalOcean API requirements:
|
||||
// - Records at root of the zone have `@` as the name
|
||||
// - CNAME records must end in a `.`
|
||||
func makeDomainEditRequest(domain, name, recordType, data string, ttl int) *godo.DomainRecordEditRequest {
|
||||
// Trim the domain off the name if present.
|
||||
adjustedName := strings.TrimSuffix(name, "."+domain)
|
||||
|
||||
// Record at the root should be defined as @ instead of the full domain name.
|
||||
if adjustedName == domain {
|
||||
adjustedName = "@"
|
||||
}
|
||||
|
||||
// For some reason the DO API requires the '.' at the end of "data" in case of CNAME request.
|
||||
// Example: {"type":"CNAME","name":"hello","data":"www.example.com."}
|
||||
if recordType == endpoint.RecordTypeCNAME && !strings.HasSuffix(data, ".") {
|
||||
data += "."
|
||||
}
|
||||
|
||||
return &godo.DomainRecordEditRequest{
|
||||
Name: adjustedName,
|
||||
Type: recordType,
|
||||
Data: data,
|
||||
TTL: ttl,
|
||||
}
|
||||
}
|
||||
|
||||
// submitChanges applies an instance of `digitalOceanChanges` to the DigitalOcean API.
|
||||
func (p *DigitalOceanProvider) submitChanges(ctx context.Context, changes *digitalOceanChanges) error {
|
||||
// return early if there is nothing to change
|
||||
if len(changes) == 0 {
|
||||
if changes.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
zones, err := p.Zones(ctx)
|
||||
for _, c := range changes.Creates {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": c.Domain,
|
||||
"dnsName": c.Options.Name,
|
||||
"recordType": c.Options.Type,
|
||||
"data": c.Options.Data,
|
||||
"ttl": c.Options.TTL,
|
||||
}).Debug("Creating domain record")
|
||||
|
||||
if p.DryRun {
|
||||
continue
|
||||
}
|
||||
|
||||
_, _, err := p.Client.CreateRecord(ctx, c.Domain, c.Options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, u := range changes.Updates {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": u.Domain,
|
||||
"dnsName": u.Options.Name,
|
||||
"recordType": u.Options.Type,
|
||||
"data": u.Options.Data,
|
||||
"ttl": u.Options.TTL,
|
||||
}).Debug("Updating domain record")
|
||||
|
||||
if p.DryRun {
|
||||
continue
|
||||
}
|
||||
|
||||
_, _, err := p.Client.EditRecord(ctx, u.Domain, u.DomainRecord.ID, u.Options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range changes.Deletes {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": d.Domain,
|
||||
"recordId": d.RecordID,
|
||||
}).Debug("Deleting domain record")
|
||||
|
||||
if p.DryRun {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := p.Client.DeleteRecord(ctx, d.Domain, d.RecordID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTTLFromEndpoint(ep *endpoint.Endpoint) int {
|
||||
if ep.RecordTTL.IsConfigured() {
|
||||
return int(ep.RecordTTL)
|
||||
}
|
||||
return digitalOceanRecordTTL
|
||||
}
|
||||
|
||||
func endpointsByZone(zoneNameIDMapper provider.ZoneIDName, endpoints []*endpoint.Endpoint) map[string][]*endpoint.Endpoint {
|
||||
endpointsByZone := make(map[string][]*endpoint.Endpoint)
|
||||
|
||||
for _, ep := range endpoints {
|
||||
zoneID, _ := zoneNameIDMapper.FindZone(ep.DNSName)
|
||||
if zoneID == "" {
|
||||
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", ep.DNSName)
|
||||
continue
|
||||
}
|
||||
endpointsByZone[zoneID] = append(endpointsByZone[zoneID], ep)
|
||||
}
|
||||
|
||||
return endpointsByZone
|
||||
}
|
||||
|
||||
func getMatchingDomainRecords(records []godo.DomainRecord, domain string, ep *endpoint.Endpoint) []godo.DomainRecord {
|
||||
var name string
|
||||
if ep.DNSName != domain {
|
||||
name = strings.TrimSuffix(ep.DNSName, "."+domain)
|
||||
} else {
|
||||
name = "@"
|
||||
}
|
||||
|
||||
var result []godo.DomainRecord
|
||||
for _, r := range records {
|
||||
if r.Name == name && r.Type == ep.RecordType {
|
||||
result = append(result, r)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func processCreateActions(
|
||||
recordsByDomain map[string][]godo.DomainRecord,
|
||||
createsByDomain map[string][]*endpoint.Endpoint,
|
||||
changes *digitalOceanChanges,
|
||||
) error {
|
||||
// Process endpoints that need to be created.
|
||||
for domain, endpoints := range createsByDomain {
|
||||
if len(endpoints) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
}).Debug("Skipping domain, no creates found.")
|
||||
continue
|
||||
}
|
||||
|
||||
records := recordsByDomain[domain]
|
||||
|
||||
for _, ep := range endpoints {
|
||||
// Warn if there are existing records since we expect to create only new records.
|
||||
matchingRecords := getMatchingDomainRecords(records, domain, ep)
|
||||
if len(matchingRecords) > 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"dnsName": ep.DNSName,
|
||||
"recordType": ep.RecordType,
|
||||
}).Warn("Preexisting records exist which should not exist for creation actions.")
|
||||
}
|
||||
|
||||
ttl := getTTLFromEndpoint(ep)
|
||||
|
||||
for _, target := range ep.Targets {
|
||||
changes.Creates = append(changes.Creates, &digitalOceanChangeCreate{
|
||||
Domain: domain,
|
||||
Options: makeDomainEditRequest(domain, ep.DNSName, ep.RecordType, target, ttl),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processUpdateActions(
|
||||
recordsByDomain map[string][]godo.DomainRecord,
|
||||
updatesByDomain map[string][]*endpoint.Endpoint,
|
||||
changes *digitalOceanChanges,
|
||||
) error {
|
||||
// Generate creates and updates based on existing
|
||||
for domain, updates := range updatesByDomain {
|
||||
if len(updates) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
}).Debug("Skipping Zone, no updates found.")
|
||||
continue
|
||||
}
|
||||
|
||||
records := recordsByDomain[domain]
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"records": records,
|
||||
}).Debug("Records for domain")
|
||||
|
||||
for _, ep := range updates {
|
||||
matchingRecords := getMatchingDomainRecords(records, domain, ep)
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"endpoint": ep,
|
||||
"matchingRecords": matchingRecords,
|
||||
}).Debug("matching records")
|
||||
|
||||
if len(matchingRecords) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"dnsName": ep.DNSName,
|
||||
"recordType": ep.RecordType,
|
||||
}).Warn("Planning an update but no existing records found.")
|
||||
}
|
||||
|
||||
matchingRecordsByTarget := map[string]godo.DomainRecord{}
|
||||
for _, r := range matchingRecords {
|
||||
matchingRecordsByTarget[r.Data] = r
|
||||
}
|
||||
|
||||
ttl := getTTLFromEndpoint(ep)
|
||||
|
||||
// Generate create and delete actions based on existence of a record for each target.
|
||||
for _, target := range ep.Targets {
|
||||
if record, ok := matchingRecordsByTarget[target]; ok {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"dnsName": ep.DNSName,
|
||||
"recordType": ep.RecordType,
|
||||
"target": target,
|
||||
}).Warn("Updating existing target")
|
||||
|
||||
changes.Updates = append(changes.Updates, &digitalOceanChangeUpdate{
|
||||
Domain: domain,
|
||||
DomainRecord: record,
|
||||
Options: makeDomainEditRequest(domain, ep.DNSName, ep.RecordType, target, ttl),
|
||||
})
|
||||
|
||||
delete(matchingRecordsByTarget, target)
|
||||
} else {
|
||||
// Record did not previously exist, create new 'target'
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"dnsName": ep.DNSName,
|
||||
"recordType": ep.RecordType,
|
||||
"target": target,
|
||||
}).Warn("Creating new target")
|
||||
|
||||
changes.Creates = append(changes.Creates, &digitalOceanChangeCreate{
|
||||
Domain: domain,
|
||||
Options: makeDomainEditRequest(domain, ep.DNSName, ep.RecordType, target, ttl),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Any remaining records have been removed, delete them
|
||||
for _, record := range matchingRecordsByTarget {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"dnsName": ep.DNSName,
|
||||
"recordType": ep.RecordType,
|
||||
"target": record.Data,
|
||||
}).Warn("Deleting target")
|
||||
|
||||
changes.Deletes = append(changes.Deletes, &digitalOceanChangeDelete{
|
||||
Domain: domain,
|
||||
RecordID: record.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processDeleteActions(
|
||||
recordsByDomain map[string][]godo.DomainRecord,
|
||||
deletesByDomain map[string][]*endpoint.Endpoint,
|
||||
changes *digitalOceanChanges,
|
||||
) error {
|
||||
// Generate delete actions for each deleted endpoint.
|
||||
for domain, deletes := range deletesByDomain {
|
||||
if len(deletes) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
}).Debug("Skipping Zone, no deletes found.")
|
||||
continue
|
||||
}
|
||||
|
||||
records := recordsByDomain[domain]
|
||||
|
||||
for _, ep := range deletes {
|
||||
matchingRecords := getMatchingDomainRecords(records, domain, ep)
|
||||
|
||||
if len(matchingRecords) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"dnsName": ep.DNSName,
|
||||
"recordType": ep.RecordType,
|
||||
}).Warn("Records to delete not found.")
|
||||
}
|
||||
|
||||
for _, record := range matchingRecords {
|
||||
doDelete := false
|
||||
for _, t := range ep.Targets {
|
||||
v1 := t
|
||||
v2 := record.Data
|
||||
if ep.RecordType == endpoint.RecordTypeCNAME {
|
||||
v1 = strings.TrimSuffix(t, ".")
|
||||
v2 = strings.TrimSuffix(t, ".")
|
||||
}
|
||||
if v1 == v2 {
|
||||
doDelete = true
|
||||
}
|
||||
}
|
||||
|
||||
if doDelete {
|
||||
changes.Deletes = append(changes.Deletes, &digitalOceanChangeDelete{
|
||||
Domain: domain,
|
||||
RecordID: record.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyChanges applies the given set of generic changes to the provider.
|
||||
func (p *DigitalOceanProvider) ApplyChanges(ctx context.Context, planChanges *plan.Changes) error {
|
||||
// TODO: This should only retrieve zones affected by the given `planChanges`.
|
||||
recordsByDomain, zoneNameIDMapper, err := p.getRecordsByDomain(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// separate into per-zone change sets to be passed to the API.
|
||||
changesByZone := digitalOceanChangesByZone(zones, changes)
|
||||
for zoneName, changes := range changesByZone {
|
||||
records, err := p.fetchRecords(ctx, zoneName)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list records in the zone: %s", zoneName)
|
||||
continue
|
||||
}
|
||||
for _, change := range changes {
|
||||
logFields := log.Fields{
|
||||
"record": change.ResourceRecordSet.Name,
|
||||
"type": change.ResourceRecordSet.Type,
|
||||
"ttl": change.ResourceRecordSet.TTL,
|
||||
"action": change.Action,
|
||||
"zone": zoneName,
|
||||
}
|
||||
createsByDomain := endpointsByZone(zoneNameIDMapper, planChanges.Create)
|
||||
updatesByDomain := endpointsByZone(zoneNameIDMapper, planChanges.UpdateNew)
|
||||
deletesByDomain := endpointsByZone(zoneNameIDMapper, planChanges.Delete)
|
||||
|
||||
log.WithFields(logFields).Info("Changing record.")
|
||||
var changes digitalOceanChanges
|
||||
|
||||
if p.DryRun {
|
||||
continue
|
||||
}
|
||||
|
||||
change.ResourceRecordSet.Name = strings.TrimSuffix(change.ResourceRecordSet.Name, "."+zoneName)
|
||||
|
||||
// record at the root should be defined as @ instead of
|
||||
// the full domain name
|
||||
if change.ResourceRecordSet.Name == zoneName {
|
||||
change.ResourceRecordSet.Name = "@"
|
||||
}
|
||||
|
||||
// for some reason the DO API requires the '.' at the end of "data" in case of CNAME request
|
||||
// Example: {"type":"CNAME","name":"hello","data":"www.example.com."}
|
||||
if change.ResourceRecordSet.Type == endpoint.RecordTypeCNAME {
|
||||
change.ResourceRecordSet.Data += "."
|
||||
}
|
||||
|
||||
switch change.Action {
|
||||
case DigitalOceanCreate:
|
||||
_, _, err = p.Client.CreateRecord(ctx, zoneName,
|
||||
&godo.DomainRecordEditRequest{
|
||||
Data: change.ResourceRecordSet.Data,
|
||||
Name: change.ResourceRecordSet.Name,
|
||||
Type: change.ResourceRecordSet.Type,
|
||||
TTL: change.ResourceRecordSet.TTL,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case DigitalOceanDelete:
|
||||
recordID := p.getRecordID(records, change.ResourceRecordSet)
|
||||
_, err = p.Client.DeleteRecord(ctx, zoneName, recordID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case DigitalOceanUpdate:
|
||||
recordID := p.getRecordID(records, change.ResourceRecordSet)
|
||||
_, _, err = p.Client.EditRecord(ctx, zoneName, recordID,
|
||||
&godo.DomainRecordEditRequest{
|
||||
Data: change.ResourceRecordSet.Data,
|
||||
Name: change.ResourceRecordSet.Name,
|
||||
Type: change.ResourceRecordSet.Type,
|
||||
TTL: change.ResourceRecordSet.TTL,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := processCreateActions(recordsByDomain, createsByDomain, &changes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyChanges applies a given set of changes in a given zone.
|
||||
func (p *DigitalOceanProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
|
||||
combinedChanges := make([]*DigitalOceanChange, 0, len(changes.Create)+len(changes.UpdateNew)+len(changes.Delete))
|
||||
|
||||
combinedChanges = append(combinedChanges, newDigitalOceanChanges(DigitalOceanCreate, changes.Create)...)
|
||||
combinedChanges = append(combinedChanges, newDigitalOceanChanges(DigitalOceanUpdate, changes.UpdateNew)...)
|
||||
combinedChanges = append(combinedChanges, newDigitalOceanChanges(DigitalOceanDelete, changes.Delete)...)
|
||||
|
||||
return p.submitChanges(ctx, combinedChanges)
|
||||
}
|
||||
|
||||
// newDigitalOceanChanges returns a collection of Changes based on the given records and action.
|
||||
func newDigitalOceanChanges(action string, endpoints []*endpoint.Endpoint) []*DigitalOceanChange {
|
||||
changes := make([]*DigitalOceanChange, 0, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
changes = append(changes, newDigitalOceanChange(action, endpoint))
|
||||
}
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newDigitalOceanChange(action string, endpoint *endpoint.Endpoint) *DigitalOceanChange {
|
||||
// no annotation results in a TTL of 0, default to 300 for consistency with other providers
|
||||
var ttl = digitalOceanRecordTTL
|
||||
if endpoint.RecordTTL.IsConfigured() {
|
||||
ttl = int(endpoint.RecordTTL)
|
||||
}
|
||||
|
||||
change := &DigitalOceanChange{
|
||||
Action: action,
|
||||
ResourceRecordSet: godo.DomainRecord{
|
||||
Name: endpoint.DNSName,
|
||||
Type: endpoint.RecordType,
|
||||
Data: endpoint.Targets[0],
|
||||
TTL: ttl,
|
||||
},
|
||||
}
|
||||
return change
|
||||
}
|
||||
|
||||
// getRecordID returns the ID from a record.
|
||||
// the ID is mandatory to update and delete records
|
||||
func (p *DigitalOceanProvider) getRecordID(records []godo.DomainRecord, record godo.DomainRecord) int {
|
||||
for _, zoneRecord := range records {
|
||||
if zoneRecord.Name == record.Name && zoneRecord.Type == record.Type {
|
||||
return zoneRecord.ID
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// digitalOceanchangesByZone separates a multi-zone change into a single change per zone.
|
||||
func digitalOceanChangesByZone(zones []godo.Domain, changeSet []*DigitalOceanChange) map[string][]*DigitalOceanChange {
|
||||
changes := make(map[string][]*DigitalOceanChange)
|
||||
zoneNameIDMapper := provider.ZoneIDName{}
|
||||
for _, z := range zones {
|
||||
zoneNameIDMapper.Add(z.Name, z.Name)
|
||||
changes[z.Name] = []*DigitalOceanChange{}
|
||||
}
|
||||
|
||||
for _, c := range changeSet {
|
||||
zone, _ := zoneNameIDMapper.FindZone(c.ResourceRecordSet.Name)
|
||||
if zone == "" {
|
||||
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", c.ResourceRecordSet.Name)
|
||||
continue
|
||||
}
|
||||
changes[zone] = append(changes[zone], c)
|
||||
}
|
||||
|
||||
return changes
|
||||
|
||||
if err := processUpdateActions(recordsByDomain, updatesByDomain, &changes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := processDeleteActions(recordsByDomain, deletesByDomain, &changes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.submitChanges(ctx, &changes)
|
||||
}
|
||||
|
@ -20,9 +20,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -143,10 +146,111 @@ func (m *mockDigitalOceanRecordsFail) Records(ctx context.Context, domain string
|
||||
return []godo.DomainRecord{}, nil, fmt.Errorf("Failed to get records")
|
||||
}
|
||||
|
||||
func TestNewDigitalOceanChanges(t *testing.T) {
|
||||
action := DigitalOceanCreate
|
||||
endpoints := []*endpoint.Endpoint{{DNSName: "new", Targets: endpoint.Targets{"target"}}}
|
||||
_ = newDigitalOceanChanges(action, endpoints)
|
||||
func isEmpty(xs interface{}) bool {
|
||||
if xs != nil {
|
||||
objValue := reflect.ValueOf(xs)
|
||||
return objValue.Len() == 0
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// This function is an adapted copy of the testify package's ElementsMatch function with the
|
||||
// call to ObjectsAreEqual replaced with cmp.Equal which better handles struct's with pointers to
|
||||
// other structs. It also ignores ordering when comparing unlike cmp.Equal.
|
||||
func elementsMatch(t *testing.T, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
|
||||
if listA == nil && listB == nil {
|
||||
return true
|
||||
} else if listA == nil {
|
||||
return isEmpty(listB)
|
||||
} else if listB == nil {
|
||||
return isEmpty(listA)
|
||||
}
|
||||
|
||||
aKind := reflect.TypeOf(listA).Kind()
|
||||
bKind := reflect.TypeOf(listB).Kind()
|
||||
|
||||
if aKind != reflect.Array && aKind != reflect.Slice {
|
||||
return assert.Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
if bKind != reflect.Array && bKind != reflect.Slice {
|
||||
return assert.Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
aValue := reflect.ValueOf(listA)
|
||||
bValue := reflect.ValueOf(listB)
|
||||
|
||||
aLen := aValue.Len()
|
||||
bLen := bValue.Len()
|
||||
|
||||
if aLen != bLen {
|
||||
return assert.Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
|
||||
}
|
||||
|
||||
// Mark indexes in bValue that we already used
|
||||
visited := make([]bool, bLen)
|
||||
for i := 0; i < aLen; i++ {
|
||||
element := aValue.Index(i).Interface()
|
||||
found := false
|
||||
for j := 0; j < bLen; j++ {
|
||||
if visited[j] {
|
||||
continue
|
||||
}
|
||||
if cmp.Equal(bValue.Index(j).Interface(), element) {
|
||||
visited[j] = true
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return assert.Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Test adapted from test in testify library.
|
||||
// https://github.com/stretchr/testify/blob/b8f7d52a4a7c581d5ed42333572e7fb857c687c2/assert/assertions_test.go#L768-L796
|
||||
func TestElementsMatch(t *testing.T) {
|
||||
mockT := new(testing.T)
|
||||
|
||||
cases := []struct {
|
||||
expected interface{}
|
||||
actual interface{}
|
||||
result bool
|
||||
}{
|
||||
// matching
|
||||
{nil, nil, true},
|
||||
|
||||
{nil, nil, true},
|
||||
{[]int{}, []int{}, true},
|
||||
{[]int{1}, []int{1}, true},
|
||||
{[]int{1, 1}, []int{1, 1}, true},
|
||||
{[]int{1, 2}, []int{1, 2}, true},
|
||||
{[]int{1, 2}, []int{2, 1}, true},
|
||||
{[2]int{1, 2}, [2]int{2, 1}, true},
|
||||
{[]string{"hello", "world"}, []string{"world", "hello"}, true},
|
||||
{[]string{"hello", "hello"}, []string{"hello", "hello"}, true},
|
||||
{[]string{"hello", "hello", "world"}, []string{"hello", "world", "hello"}, true},
|
||||
{[3]string{"hello", "hello", "world"}, [3]string{"hello", "world", "hello"}, true},
|
||||
{[]int{}, nil, true},
|
||||
|
||||
// not matching
|
||||
{[]int{1}, []int{1, 1}, false},
|
||||
{[]int{1, 2}, []int{2, 2}, false},
|
||||
{[]string{"hello", "hello"}, []string{"hello"}, false},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("ElementsMatch(%#v, %#v)", c.expected, c.actual), func(t *testing.T) {
|
||||
res := elementsMatch(mockT, c.actual, c.expected)
|
||||
|
||||
if res != c.result {
|
||||
t.Errorf("elementsMatch(%#v, %#v) should return %v", c.actual, c.expected, c.result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDigitalOceanZones(t *testing.T) {
|
||||
@ -165,6 +269,38 @@ func TestDigitalOceanZones(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestDigitalOceanMakeDomainEditRequest(t *testing.T) {
|
||||
// Ensure that records at the root of the zone get `@` as the name.
|
||||
r1 := makeDomainEditRequest("example.com", "example.com", endpoint.RecordTypeA,
|
||||
"1.2.3.4", digitalOceanRecordTTL)
|
||||
assert.Equal(t, &godo.DomainRecordEditRequest{
|
||||
Type: endpoint.RecordTypeA,
|
||||
Name: "@",
|
||||
Data: "1.2.3.4",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
}, r1)
|
||||
|
||||
// Ensure the CNAME records have a `.` appended.
|
||||
r2 := makeDomainEditRequest("example.com", "foo.example.com", endpoint.RecordTypeCNAME,
|
||||
"bar.example.com", digitalOceanRecordTTL)
|
||||
assert.Equal(t, &godo.DomainRecordEditRequest{
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Name: "foo",
|
||||
Data: "bar.example.com.",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
}, r2)
|
||||
|
||||
// Ensure that CNAME records do not have an extra `.` appended if they already have a `.`
|
||||
r3 := makeDomainEditRequest("example.com", "foo.example.com", endpoint.RecordTypeCNAME,
|
||||
"bar.example.com.", digitalOceanRecordTTL)
|
||||
assert.Equal(t, &godo.DomainRecordEditRequest{
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Name: "foo",
|
||||
Data: "bar.example.com.",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
}, r3)
|
||||
}
|
||||
|
||||
func TestDigitalOceanApplyChanges(t *testing.T) {
|
||||
changes := &plan.Changes{}
|
||||
provider := &DigitalOceanProvider{
|
||||
@ -185,42 +321,267 @@ func TestDigitalOceanApplyChanges(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDigitalOceanProcessCreateActions(t *testing.T) {
|
||||
recordsByDomain := map[string][]godo.DomainRecord{
|
||||
"example.com": nil,
|
||||
}
|
||||
|
||||
createsByDomain := map[string][]*endpoint.Endpoint{
|
||||
"example.com": {
|
||||
endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypeA, "1.2.3.4"),
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeCNAME, "foo.example.com"),
|
||||
},
|
||||
}
|
||||
|
||||
var changes digitalOceanChanges
|
||||
err := processCreateActions(recordsByDomain, createsByDomain, &changes)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Creates))
|
||||
assert.Equal(t, 0, len(changes.Updates))
|
||||
assert.Equal(t, 0, len(changes.Deletes))
|
||||
|
||||
expectedCreates := []*digitalOceanChangeCreate{
|
||||
{
|
||||
Domain: "example.com",
|
||||
Options: &godo.DomainRecordEditRequest{
|
||||
Name: "foo",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "1.2.3.4",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
},
|
||||
{
|
||||
Domain: "example.com",
|
||||
Options: &godo.DomainRecordEditRequest{
|
||||
Name: "@",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Data: "foo.example.com.",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !elementsMatch(t, expectedCreates, changes.Creates) {
|
||||
assert.Failf(t, "diff: %s", cmp.Diff(expectedCreates, changes.Creates))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDigitalOceanProcessUpdateActions(t *testing.T) {
|
||||
recordsByDomain := map[string][]godo.DomainRecord{
|
||||
"example.com": {
|
||||
{
|
||||
ID: 1,
|
||||
Name: "foo",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "1.2.3.4",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Name: "foo",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "5.6.7.8",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Name: "@",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Data: "foo.example.com.",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
updatesByDomain := map[string][]*endpoint.Endpoint{
|
||||
"example.com": {
|
||||
endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypeA, "10.11.12.13"),
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeCNAME, "bar.example.com"),
|
||||
},
|
||||
}
|
||||
|
||||
var changes digitalOceanChanges
|
||||
err := processUpdateActions(recordsByDomain, updatesByDomain, &changes)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Creates))
|
||||
assert.Equal(t, 0, len(changes.Updates))
|
||||
assert.Equal(t, 3, len(changes.Deletes))
|
||||
|
||||
expectedCreates := []*digitalOceanChangeCreate{
|
||||
{
|
||||
Domain: "example.com",
|
||||
Options: &godo.DomainRecordEditRequest{
|
||||
Name: "foo",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "10.11.12.13",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
},
|
||||
{
|
||||
Domain: "example.com",
|
||||
Options: &godo.DomainRecordEditRequest{
|
||||
Name: "@",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Data: "bar.example.com.",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !elementsMatch(t, expectedCreates, changes.Creates) {
|
||||
assert.Failf(t, "diff: %s", cmp.Diff(expectedCreates, changes.Creates))
|
||||
}
|
||||
|
||||
expectedDeletes := []*digitalOceanChangeDelete{
|
||||
{
|
||||
Domain: "example.com",
|
||||
RecordID: 1,
|
||||
},
|
||||
{
|
||||
Domain: "example.com",
|
||||
RecordID: 2,
|
||||
},
|
||||
{
|
||||
Domain: "example.com",
|
||||
RecordID: 3,
|
||||
},
|
||||
}
|
||||
|
||||
if !elementsMatch(t, expectedDeletes, changes.Deletes) {
|
||||
assert.Failf(t, "diff: %s", cmp.Diff(expectedDeletes, changes.Deletes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDigitalOceanProcessDeleteActions(t *testing.T) {
|
||||
recordsByDomain := map[string][]godo.DomainRecord{
|
||||
"example.com": {
|
||||
{
|
||||
ID: 1,
|
||||
Name: "foo",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "1.2.3.4",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
// This record will not be deleted because it represents a target not specified to be deleted.
|
||||
{
|
||||
ID: 2,
|
||||
Name: "foo",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "5.6.7.8",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Name: "@",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Data: "foo.example.com.",
|
||||
TTL: digitalOceanRecordTTL,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
deletesByDomain := map[string][]*endpoint.Endpoint{
|
||||
"example.com": {
|
||||
endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypeA, "1.2.3.4"),
|
||||
endpoint.NewEndpoint("example.com", endpoint.RecordTypeCNAME, "foo.example.com"),
|
||||
},
|
||||
}
|
||||
|
||||
var changes digitalOceanChanges
|
||||
err := processDeleteActions(recordsByDomain, deletesByDomain, &changes)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 0, len(changes.Creates))
|
||||
assert.Equal(t, 0, len(changes.Updates))
|
||||
assert.Equal(t, 2, len(changes.Deletes))
|
||||
|
||||
expectedDeletes := []*digitalOceanChangeDelete{
|
||||
{
|
||||
Domain: "example.com",
|
||||
RecordID: 1,
|
||||
},
|
||||
{
|
||||
Domain: "example.com",
|
||||
RecordID: 3,
|
||||
},
|
||||
}
|
||||
|
||||
if !elementsMatch(t, expectedDeletes, changes.Deletes) {
|
||||
assert.Failf(t, "diff: %s", cmp.Diff(expectedDeletes, changes.Deletes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDigitalOceanProvider(t *testing.T) {
|
||||
_ = os.Setenv("DO_TOKEN", "xxxxxxxxxxxxxxxxx")
|
||||
_, err := NewDigitalOceanProvider(context.Background(), endpoint.NewDomainFilter([]string{"ext-dns-test.zalando.to."}), true)
|
||||
_, err := NewDigitalOceanProvider(context.Background(), endpoint.NewDomainFilter([]string{"ext-dns-test.zalando.to."}), true, 50)
|
||||
if err != nil {
|
||||
t.Errorf("should not fail, %s", err)
|
||||
}
|
||||
_ = os.Unsetenv("DO_TOKEN")
|
||||
_, err = NewDigitalOceanProvider(context.Background(), endpoint.NewDomainFilter([]string{"ext-dns-test.zalando.to."}), true)
|
||||
_, err = NewDigitalOceanProvider(context.Background(), endpoint.NewDomainFilter([]string{"ext-dns-test.zalando.to."}), true, 50)
|
||||
if err == nil {
|
||||
t.Errorf("expected to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDigitalOceanGetRecordID(t *testing.T) {
|
||||
p := &DigitalOceanProvider{}
|
||||
func TestDigitalOceanGetMatchingDomainRecords(t *testing.T) {
|
||||
records := []godo.DomainRecord{
|
||||
{
|
||||
ID: 1,
|
||||
Name: "foo.com",
|
||||
Name: "foo",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
Data: "baz.org.",
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Name: "baz.de",
|
||||
Name: "baz",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "1.2.3.4",
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Name: "baz",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "5.6.7.8",
|
||||
},
|
||||
{
|
||||
ID: 4,
|
||||
Name: "@",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "9.10.11.12",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, 1, p.getRecordID(records, godo.DomainRecord{
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeCNAME,
|
||||
}))
|
||||
|
||||
assert.Equal(t, 0, p.getRecordID(records, godo.DomainRecord{
|
||||
Name: "foo.com",
|
||||
Type: endpoint.RecordTypeA,
|
||||
}))
|
||||
ep1 := endpoint.NewEndpoint("foo.com", endpoint.RecordTypeCNAME)
|
||||
assert.Equal(t, 1, len(getMatchingDomainRecords(records, "com", ep1)))
|
||||
|
||||
ep2 := endpoint.NewEndpoint("foo.com", endpoint.RecordTypeA)
|
||||
assert.Equal(t, 0, len(getMatchingDomainRecords(records, "com", ep2)))
|
||||
|
||||
ep3 := endpoint.NewEndpoint("baz.org", endpoint.RecordTypeA)
|
||||
r := getMatchingDomainRecords(records, "org", ep3)
|
||||
assert.Equal(t, 2, len(r))
|
||||
assert.ElementsMatch(t, r, []godo.DomainRecord{
|
||||
{
|
||||
ID: 2,
|
||||
Name: "baz",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "1.2.3.4",
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Name: "baz",
|
||||
Type: endpoint.RecordTypeA,
|
||||
Data: "5.6.7.8",
|
||||
},
|
||||
})
|
||||
|
||||
ep4 := endpoint.NewEndpoint("example.com", endpoint.RecordTypeA)
|
||||
r2 := getMatchingDomainRecords(records, "example.com", ep4)
|
||||
assert.Equal(t, 1, len(r2))
|
||||
assert.Equal(t, "9.10.11.12", r2[0].Data)
|
||||
}
|
||||
|
||||
func validateDigitalOceanZones(t *testing.T, zones []godo.Domain, expected []godo.Domain) {
|
||||
@ -265,3 +626,36 @@ func TestDigitalOceanAllRecords(t *testing.T) {
|
||||
t.Errorf("expected to fail, %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDigitalOceanMergeRecordsByNameType(t *testing.T) {
|
||||
xs := []*endpoint.Endpoint{
|
||||
endpoint.NewEndpoint("foo.example.com", "A", "1.2.3.4"),
|
||||
endpoint.NewEndpoint("bar.example.com", "A", "1.2.3.4"),
|
||||
endpoint.NewEndpoint("foo.example.com", "A", "5.6.7.8"),
|
||||
endpoint.NewEndpoint("foo.example.com", "CNAME", "somewhere.out.there.com"),
|
||||
}
|
||||
|
||||
merged := mergeEndpointsByNameType(xs)
|
||||
|
||||
assert.Equal(t, 3, len(merged))
|
||||
sort.SliceStable(merged, func(i, j int) bool {
|
||||
if merged[i].DNSName != merged[j].DNSName {
|
||||
return merged[i].DNSName < merged[j].DNSName
|
||||
}
|
||||
return merged[i].RecordType < merged[j].RecordType
|
||||
})
|
||||
assert.Equal(t, "bar.example.com", merged[0].DNSName)
|
||||
assert.Equal(t, "A", merged[0].RecordType)
|
||||
assert.Equal(t, 1, len(merged[0].Targets))
|
||||
assert.Equal(t, "1.2.3.4", merged[0].Targets[0])
|
||||
|
||||
assert.Equal(t, "foo.example.com", merged[1].DNSName)
|
||||
assert.Equal(t, "A", merged[1].RecordType)
|
||||
assert.Equal(t, 2, len(merged[1].Targets))
|
||||
assert.ElementsMatch(t, []string{"1.2.3.4", "5.6.7.8"}, merged[1].Targets)
|
||||
|
||||
assert.Equal(t, "foo.example.com", merged[2].DNSName)
|
||||
assert.Equal(t, "CNAME", merged[2].RecordType)
|
||||
assert.Equal(t, 1, len(merged[2].Targets))
|
||||
assert.Equal(t, "somewhere.out.there.com", merged[2].Targets[0])
|
||||
}
|
||||
|
@ -77,6 +77,7 @@ func (z dnsimpleZoneService) UpdateRecord(ctx context.Context, accountID string,
|
||||
}
|
||||
|
||||
type dnsimpleProvider struct {
|
||||
provider.BaseProvider
|
||||
client dnsimpleZoneServiceInterface
|
||||
identity dnsimpleIdentityService
|
||||
accountID string
|
||||
@ -100,7 +101,7 @@ const (
|
||||
func NewDnsimpleProvider(domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool) (provider.Provider, error) {
|
||||
oauthToken := os.Getenv("DNSIMPLE_OAUTH")
|
||||
if len(oauthToken) == 0 {
|
||||
return nil, fmt.Errorf("No dnsimple oauth token provided")
|
||||
return nil, fmt.Errorf("no dnsimple oauth token provided")
|
||||
}
|
||||
|
||||
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: oauthToken})
|
||||
@ -318,7 +319,7 @@ func (p *dnsimpleProvider) GetRecordID(ctx context.Context, zone string, recordN
|
||||
break
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("No record id found")
|
||||
return 0, fmt.Errorf("no record id found")
|
||||
}
|
||||
|
||||
// dnsimpleSuitableZone returns the most suitable zone for a given hostname and a set of zones.
|
||||
|
@ -104,6 +104,7 @@ func (snap *ZoneSnapshot) StoreRecordsForSerial(zone string, serial int, records
|
||||
|
||||
// DynProvider is the actual interface impl.
|
||||
type dynProviderState struct {
|
||||
provider.BaseProvider
|
||||
DynConfig
|
||||
LastLoginErrorTime int64
|
||||
|
||||
@ -157,7 +158,6 @@ func NewDynProvider(config DynConfig) (provider.Provider, error) {
|
||||
func filterAndFixLinks(links []string, filter endpoint.DomainFilter) []string {
|
||||
var result []string
|
||||
for _, link := range links {
|
||||
|
||||
// link looks like /REST/CNAMERecord/acme.com/exchange.acme.com/349386875
|
||||
|
||||
// strip /REST/
|
||||
@ -291,7 +291,6 @@ func (d *dynProviderState) allRecordsToEndpoints(records *dynectsoap.GetAllRecor
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
}
|
||||
|
||||
func errorOrValue(err error, value interface{}) interface{} {
|
||||
@ -393,7 +392,6 @@ func (d *dynProviderState) fetchAllRecordsInZone(zone string) (*dynectsoap.GetAl
|
||||
}
|
||||
|
||||
return &records, nil
|
||||
|
||||
}
|
||||
|
||||
// buildLinkToRecord build a resource link. The symmetry of the dyn API is used to save
|
||||
@ -569,7 +567,7 @@ func (d *dynProviderState) commit(client *dynect.Client) error {
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
return fmt.Errorf("Multiple errors committing: %+v", errs)
|
||||
return fmt.Errorf("multiple errors committing: %+v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
@ -680,7 +678,7 @@ func (d *dynProviderState) ApplyChanges(ctx context.Context, changes *plan.Chang
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
return fmt.Errorf("Multiple errors committing: %+v", errs)
|
||||
return fmt.Errorf("multiple errors committing: %+v", errs)
|
||||
}
|
||||
|
||||
if needsCommit {
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
)
|
||||
|
||||
// EgoscaleClientI for replaceable implementation
|
||||
@ -38,6 +39,7 @@ type EgoscaleClientI interface {
|
||||
|
||||
// ExoscaleProvider initialized as dns provider with no records
|
||||
type ExoscaleProvider struct {
|
||||
provider.BaseProvider
|
||||
domain endpoint.DomainFilter
|
||||
client EgoscaleClientI
|
||||
filter *zoneFilter
|
||||
|
@ -99,6 +99,7 @@ func (c changesService) Create(project string, managedZone string, change *dns.C
|
||||
|
||||
// GoogleProvider is an implementation of Provider for Google CloudDNS.
|
||||
type GoogleProvider struct {
|
||||
provider.BaseProvider
|
||||
// The Google project to work in
|
||||
project string
|
||||
// Enabled dry-run will print any modifying actions rather than execute them.
|
||||
|
214
provider/hetzner/hetzner.go
Normal file
214
provider/hetzner/hetzner.go
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hetzner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
hclouddns "git.blindage.org/21h/hcloud-dns"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
)
|
||||
|
||||
const (
|
||||
hetznerCreate = "CREATE"
|
||||
hetznerDelete = "DELETE"
|
||||
hetznerUpdate = "UPDATE"
|
||||
hetznerTTL = 600
|
||||
)
|
||||
|
||||
type HetznerChanges struct {
|
||||
Action string
|
||||
ZoneID string
|
||||
ZoneName string
|
||||
ResourceRecordSet hclouddns.HCloudRecord
|
||||
}
|
||||
|
||||
type HetznerProvider struct {
|
||||
provider.BaseProvider
|
||||
Client hclouddns.HCloudClientAdapter
|
||||
domainFilter endpoint.DomainFilter
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
func NewHetznerProvider(ctx context.Context, domainFilter endpoint.DomainFilter, dryRun bool) (*HetznerProvider, error) {
|
||||
token, ok := os.LookupEnv("HETZNER_TOKEN")
|
||||
if !ok {
|
||||
return nil, errors.New("no environment variable HETZNER_TOKEN provided")
|
||||
}
|
||||
|
||||
client := hclouddns.New(token)
|
||||
|
||||
provider := &HetznerProvider{
|
||||
Client: client,
|
||||
domainFilter: domainFilter,
|
||||
DryRun: dryRun,
|
||||
}
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
zones, err := p.Client.GetZones(hclouddns.HCloudGetZonesParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoints := []*endpoint.Endpoint{}
|
||||
for _, zone := range zones.Zones {
|
||||
records, err := p.Client.GetRecords(hclouddns.HCloudGetRecordsParams{ZoneID: zone.ID})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, r := range records.Records {
|
||||
if provider.SupportedRecordType(string(r.RecordType)) {
|
||||
name := r.Name + "." + zone.Name
|
||||
|
||||
if r.Name == "@" {
|
||||
name = zone.Name
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, endpoint.NewEndpoint(name, string(r.RecordType), r.Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
|
||||
combinedChanges := make([]*HetznerChanges, 0, len(changes.Create)+len(changes.UpdateNew)+len(changes.Delete))
|
||||
|
||||
combinedChanges = append(combinedChanges, p.newHetznerChanges(hetznerCreate, changes.Create)...)
|
||||
combinedChanges = append(combinedChanges, p.newHetznerChanges(hetznerUpdate, changes.UpdateNew)...)
|
||||
combinedChanges = append(combinedChanges, p.newHetznerChanges(hetznerDelete, changes.Delete)...)
|
||||
|
||||
return p.submitChanges(ctx, combinedChanges)
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) submitChanges(ctx context.Context, changes []*HetznerChanges) error {
|
||||
if len(changes) == 0 {
|
||||
log.Infof("All records are already up to date")
|
||||
return nil
|
||||
}
|
||||
|
||||
zones, err := p.Client.GetZones(hclouddns.HCloudGetZonesParams{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zoneChanges := p.seperateChangesByZone(zones.Zones, changes)
|
||||
|
||||
for _, changes := range zoneChanges {
|
||||
for _, change := range changes {
|
||||
log.WithFields(log.Fields{
|
||||
"record": change.ResourceRecordSet.Name,
|
||||
"type": change.ResourceRecordSet.RecordType,
|
||||
"ttl": change.ResourceRecordSet.TTL,
|
||||
"action": change.Action,
|
||||
"zone": change.ZoneName,
|
||||
"zone_id": change.ZoneID,
|
||||
}).Info("Changing record.")
|
||||
|
||||
change.ResourceRecordSet.Name = strings.TrimSuffix(change.ResourceRecordSet.Name, "."+change.ZoneName)
|
||||
if change.ResourceRecordSet.Name == change.ZoneName {
|
||||
change.ResourceRecordSet.Name = "@"
|
||||
}
|
||||
if change.ResourceRecordSet.RecordType == endpoint.RecordTypeCNAME {
|
||||
change.ResourceRecordSet.Value += "."
|
||||
}
|
||||
|
||||
switch change.Action {
|
||||
case hetznerCreate:
|
||||
record := hclouddns.HCloudRecord{
|
||||
RecordType: change.ResourceRecordSet.RecordType,
|
||||
ZoneID: change.ZoneID,
|
||||
Name: change.ResourceRecordSet.Name,
|
||||
Value: change.ResourceRecordSet.Value,
|
||||
TTL: change.ResourceRecordSet.TTL,
|
||||
}
|
||||
_, err := p.Client.CreateRecord(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case hetznerDelete:
|
||||
_, err := p.Client.DeleteRecord(change.ResourceRecordSet.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case hetznerUpdate:
|
||||
record := hclouddns.HCloudRecord{
|
||||
RecordType: change.ResourceRecordSet.RecordType,
|
||||
ZoneID: change.ZoneID,
|
||||
Name: change.ResourceRecordSet.Name,
|
||||
Value: change.ResourceRecordSet.Value,
|
||||
TTL: change.ResourceRecordSet.TTL,
|
||||
}
|
||||
_, err := p.Client.UpdateRecord(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) newHetznerChanges(action string, endpoints []*endpoint.Endpoint) []*HetznerChanges {
|
||||
changes := make([]*HetznerChanges, 0, len(endpoints))
|
||||
ttl := hetznerTTL
|
||||
for _, e := range endpoints {
|
||||
if e.RecordTTL.IsConfigured() {
|
||||
ttl = int(e.RecordTTL)
|
||||
}
|
||||
change := &HetznerChanges{
|
||||
Action: action,
|
||||
ResourceRecordSet: hclouddns.HCloudRecord{
|
||||
RecordType: hclouddns.RecordType(e.RecordType),
|
||||
Name: e.DNSName,
|
||||
Value: e.Targets[0],
|
||||
TTL: ttl,
|
||||
},
|
||||
}
|
||||
changes = append(changes, change)
|
||||
}
|
||||
return changes
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) seperateChangesByZone(zones []hclouddns.HCloudZone, changes []*HetznerChanges) map[string][]*HetznerChanges {
|
||||
change := make(map[string][]*HetznerChanges)
|
||||
zoneNameID := provider.ZoneIDName{}
|
||||
|
||||
for _, z := range zones {
|
||||
zoneNameID.Add(z.ID, z.Name)
|
||||
change[z.ID] = []*HetznerChanges{}
|
||||
}
|
||||
|
||||
for _, c := range changes {
|
||||
zoneID, zoneName := zoneNameID.FindZone(c.ResourceRecordSet.Name)
|
||||
if zoneName == "" {
|
||||
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", c.ResourceRecordSet.Name)
|
||||
continue
|
||||
}
|
||||
c.ZoneName = zoneName
|
||||
c.ZoneID = zoneID
|
||||
change[zoneID] = append(change[zoneID], c)
|
||||
}
|
||||
return change
|
||||
}
|
227
provider/hetzner/hetzner_test.go
Normal file
227
provider/hetzner/hetzner_test.go
Normal file
@ -0,0 +1,227 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hetzner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
hclouddns "git.blindage.org/21h/hcloud-dns"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
)
|
||||
|
||||
type mockHCloudClientAdapter interface {
|
||||
GetZone(ID string) (hclouddns.HCloudAnswerGetZone, error)
|
||||
GetZones(params hclouddns.HCloudGetZonesParams) (hclouddns.HCloudAnswerGetZones, error)
|
||||
UpdateZone(zone hclouddns.HCloudZone) (hclouddns.HCloudAnswerGetZone, error)
|
||||
DeleteZone(ID string) (hclouddns.HCloudAnswerDeleteZone, error)
|
||||
CreateZone(zone hclouddns.HCloudZone) (hclouddns.HCloudAnswerGetZone, error)
|
||||
ImportZoneString(zoneID string, zonePlainText string) (hclouddns.HCloudAnswerGetZone, error)
|
||||
ExportZoneToString(zoneID string) (hclouddns.HCloudAnswerGetZonePlainText, error)
|
||||
ValidateZoneString(zonePlainText string) (hclouddns.HCloudAnswerZoneValidate, error)
|
||||
GetRecords(params hclouddns.HCloudGetRecordsParams) (hclouddns.HCloudAnswerGetRecords, error)
|
||||
UpdateRecord(record hclouddns.HCloudRecord) (hclouddns.HCloudAnswerGetRecord, error)
|
||||
DeleteRecord(ID string) (hclouddns.HCloudAnswerDeleteRecord, error)
|
||||
CreateRecord(record hclouddns.HCloudRecord) (hclouddns.HCloudAnswerGetRecord, error)
|
||||
CreateRecordBulk(record []hclouddns.HCloudRecord) (hclouddns.HCloudAnswerCreateRecords, error)
|
||||
UpdateRecordBulk(record []hclouddns.HCloudRecord) (hclouddns.HCloudAnswerUpdateRecords, error)
|
||||
}
|
||||
|
||||
type mockHCloudClient struct {
|
||||
Token string `yaml:"token"`
|
||||
}
|
||||
|
||||
// New instance
|
||||
func mockHCloudNew(t string) mockHCloudClientAdapter {
|
||||
return &mockHCloudClient{
|
||||
Token: t,
|
||||
}
|
||||
}
|
||||
|
||||
// Mock all methods
|
||||
|
||||
func (m *mockHCloudClient) GetZone(ID string) (hclouddns.HCloudAnswerGetZone, error) {
|
||||
return hclouddns.HCloudAnswerGetZone{}, nil
|
||||
}
|
||||
|
||||
func (m *mockHCloudClient) GetZones(params hclouddns.HCloudGetZonesParams) (hclouddns.HCloudAnswerGetZones, error) {
|
||||
return hclouddns.HCloudAnswerGetZones{
|
||||
Zones: []hclouddns.HCloudZone{
|
||||
{
|
||||
ID: "HetznerZoneID",
|
||||
Name: "blindage.org",
|
||||
TTL: 666,
|
||||
RecordsCount: 1,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// zones
|
||||
func (m *mockHCloudClient) UpdateZone(zone hclouddns.HCloudZone) (hclouddns.HCloudAnswerGetZone, error) {
|
||||
return hclouddns.HCloudAnswerGetZone{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) DeleteZone(ID string) (hclouddns.HCloudAnswerDeleteZone, error) {
|
||||
return hclouddns.HCloudAnswerDeleteZone{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) CreateZone(zone hclouddns.HCloudZone) (hclouddns.HCloudAnswerGetZone, error) {
|
||||
return hclouddns.HCloudAnswerGetZone{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) ImportZoneString(zoneID string, zonePlainText string) (hclouddns.HCloudAnswerGetZone, error) {
|
||||
return hclouddns.HCloudAnswerGetZone{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) ExportZoneToString(zoneID string) (hclouddns.HCloudAnswerGetZonePlainText, error) {
|
||||
return hclouddns.HCloudAnswerGetZonePlainText{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) ValidateZoneString(zonePlainText string) (hclouddns.HCloudAnswerZoneValidate, error) {
|
||||
return hclouddns.HCloudAnswerZoneValidate{}, nil
|
||||
}
|
||||
|
||||
// records
|
||||
func (m *mockHCloudClient) GetRecords(params hclouddns.HCloudGetRecordsParams) (hclouddns.HCloudAnswerGetRecords, error) {
|
||||
return hclouddns.HCloudAnswerGetRecords{
|
||||
Records: []hclouddns.HCloudRecord{
|
||||
{
|
||||
RecordType: hclouddns.RecordType("A"),
|
||||
ID: "ATypeRecordID",
|
||||
ZoneID: "HetznerZoneID",
|
||||
Name: "@",
|
||||
Value: "127.0.0.1",
|
||||
TTL: 666,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) UpdateRecord(record hclouddns.HCloudRecord) (hclouddns.HCloudAnswerGetRecord, error) {
|
||||
return hclouddns.HCloudAnswerGetRecord{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) DeleteRecord(ID string) (hclouddns.HCloudAnswerDeleteRecord, error) {
|
||||
return hclouddns.HCloudAnswerDeleteRecord{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) CreateRecord(record hclouddns.HCloudRecord) (hclouddns.HCloudAnswerGetRecord, error) {
|
||||
return hclouddns.HCloudAnswerGetRecord{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) CreateRecordBulk(record []hclouddns.HCloudRecord) (hclouddns.HCloudAnswerCreateRecords, error) {
|
||||
return hclouddns.HCloudAnswerCreateRecords{}, nil
|
||||
}
|
||||
func (m *mockHCloudClient) UpdateRecordBulk(record []hclouddns.HCloudRecord) (hclouddns.HCloudAnswerUpdateRecords, error) {
|
||||
return hclouddns.HCloudAnswerUpdateRecords{}, nil
|
||||
}
|
||||
|
||||
func TestNewHetznerProvider(t *testing.T) {
|
||||
_ = os.Setenv("HETZNER_TOKEN", "myHetznerToken")
|
||||
_, err := NewHetznerProvider(context.Background(), endpoint.NewDomainFilter([]string{"blindage.org"}), true)
|
||||
if err != nil {
|
||||
t.Errorf("failed : %s", err)
|
||||
}
|
||||
|
||||
_ = os.Unsetenv("HETZNER_TOKEN")
|
||||
_, err = NewHetznerProvider(context.Background(), endpoint.NewDomainFilter([]string{"blindage.org"}), true)
|
||||
if err == nil {
|
||||
t.Errorf("expected to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHetznerProvider_TestData(t *testing.T) {
|
||||
|
||||
mockedClient := mockHCloudNew("myHetznerToken")
|
||||
|
||||
// Check test zone data is ok
|
||||
expectedZonesAnswer := hclouddns.HCloudAnswerGetZones{
|
||||
Zones: []hclouddns.HCloudZone{
|
||||
{
|
||||
ID: "HetznerZoneID",
|
||||
Name: "blindage.org",
|
||||
TTL: 666,
|
||||
RecordsCount: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testingZonesAnswer, err := mockedClient.GetZones(hclouddns.HCloudGetZonesParams{})
|
||||
if err != nil {
|
||||
t.Errorf("should not fail, %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedZonesAnswer, testingZonesAnswer) {
|
||||
t.Errorf("should be equal, %s", err)
|
||||
}
|
||||
|
||||
// Check test record data is ok
|
||||
expectedRecordsAnswer := hclouddns.HCloudAnswerGetRecords{
|
||||
Records: []hclouddns.HCloudRecord{
|
||||
{
|
||||
RecordType: hclouddns.RecordType("A"),
|
||||
ID: "ATypeRecordID",
|
||||
ZoneID: "HetznerZoneID",
|
||||
Name: "@",
|
||||
Value: "127.0.0.1",
|
||||
TTL: 666,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testingRecordsAnswer, err := mockedClient.GetRecords(hclouddns.HCloudGetRecordsParams{})
|
||||
if err != nil {
|
||||
t.Errorf("should not fail, %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedRecordsAnswer, testingRecordsAnswer) {
|
||||
t.Errorf("should be equal, %s", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestHetznerProvider_Records(t *testing.T) {
|
||||
|
||||
mockedClient := mockHCloudNew("myHetznerToken")
|
||||
|
||||
mockedProvider := &HetznerProvider{
|
||||
Client: mockedClient,
|
||||
}
|
||||
|
||||
// Now check Records function of provider, if ZoneID equal "blindage.org" must be returned
|
||||
endpoints, err := mockedProvider.Records(context.Background())
|
||||
if err != nil {
|
||||
t.Errorf("should not fail, %s", err)
|
||||
}
|
||||
fmt.Printf("%+v\n", endpoints[0].DNSName)
|
||||
assert.Equal(t, "blindage.org", endpoints[0].DNSName)
|
||||
}
|
||||
|
||||
func TestHetznerProvider_ApplyChanges(t *testing.T) {
|
||||
changes := &plan.Changes{}
|
||||
mockedClient := mockHCloudNew("myHetznerToken")
|
||||
mockedProvider := &HetznerProvider{
|
||||
Client: mockedClient,
|
||||
}
|
||||
|
||||
changes.Create = []*endpoint.Endpoint{
|
||||
{DNSName: "test.org", Targets: endpoint.Targets{"target"}},
|
||||
{DNSName: "test.test.org", Targets: endpoint.Targets{"target"}, RecordTTL: 666},
|
||||
}
|
||||
changes.UpdateNew = []*endpoint.Endpoint{{DNSName: "test.test.org", Targets: endpoint.Targets{"target-new"}, RecordType: "A", RecordTTL: 777}}
|
||||
changes.Delete = []*endpoint.Endpoint{{DNSName: "test.test.org", Targets: endpoint.Targets{"target"}, RecordType: "A"}}
|
||||
|
||||
err := mockedProvider.ApplyChanges(context.Background(), changes)
|
||||
if err != nil {
|
||||
t.Errorf("should not fail, %s", err)
|
||||
}
|
||||
}
|
@ -49,6 +49,7 @@ type InfobloxConfig struct {
|
||||
|
||||
// InfobloxProvider implements the DNS provider for Infoblox.
|
||||
type InfobloxProvider struct {
|
||||
provider.BaseProvider
|
||||
client ibclient.IBConnector
|
||||
domainFilter endpoint.DomainFilter
|
||||
zoneIDFilter provider.ZoneIDFilter
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
"sigs.k8s.io/external-dns/plan"
|
||||
"sigs.k8s.io/external-dns/provider"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -43,6 +44,7 @@ var (
|
||||
// InMemoryProvider - dns provider only used for testing purposes
|
||||
// initialized as dns provider with no records
|
||||
type InMemoryProvider struct {
|
||||
provider.BaseProvider
|
||||
domain endpoint.DomainFilter
|
||||
client *inMemoryClient
|
||||
filter *filter
|
||||
|
@ -35,8 +35,8 @@ import (
|
||||
|
||||
// LinodeDomainClient interface to ease testing
|
||||
type LinodeDomainClient interface {
|
||||
ListDomainRecords(ctx context.Context, domainID int, opts *linodego.ListOptions) ([]*linodego.DomainRecord, error)
|
||||
ListDomains(ctx context.Context, opts *linodego.ListOptions) ([]*linodego.Domain, error)
|
||||
ListDomainRecords(ctx context.Context, domainID int, opts *linodego.ListOptions) ([]linodego.DomainRecord, error)
|
||||
ListDomains(ctx context.Context, opts *linodego.ListOptions) ([]linodego.Domain, error)
|
||||
CreateDomainRecord(ctx context.Context, domainID int, domainrecord linodego.DomainRecordCreateOptions) (*linodego.DomainRecord, error)
|
||||
DeleteDomainRecord(ctx context.Context, domainID int, id int) error
|
||||
UpdateDomainRecord(ctx context.Context, domainID int, id int, domainrecord linodego.DomainRecordUpdateOptions) (*linodego.DomainRecord, error)
|
||||
@ -44,6 +44,7 @@ type LinodeDomainClient interface {
|
||||
|
||||
// LinodeProvider is an implementation of Provider for Digital Ocean's DNS.
|
||||
type LinodeProvider struct {
|
||||
provider.BaseProvider
|
||||
Client LinodeDomainClient
|
||||
domainFilter endpoint.DomainFilter
|
||||
DryRun bool
|
||||
@ -51,28 +52,28 @@ type LinodeProvider struct {
|
||||
|
||||
// LinodeChanges All API calls calculated from the plan
|
||||
type LinodeChanges struct {
|
||||
Creates []*LinodeChangeCreate
|
||||
Deletes []*LinodeChangeDelete
|
||||
Updates []*LinodeChangeUpdate
|
||||
Creates []LinodeChangeCreate
|
||||
Deletes []LinodeChangeDelete
|
||||
Updates []LinodeChangeUpdate
|
||||
}
|
||||
|
||||
// LinodeChangeCreate Linode Domain Record Creates
|
||||
type LinodeChangeCreate struct {
|
||||
Domain *linodego.Domain
|
||||
Domain linodego.Domain
|
||||
Options linodego.DomainRecordCreateOptions
|
||||
}
|
||||
|
||||
// LinodeChangeUpdate Linode Domain Record Updates
|
||||
type LinodeChangeUpdate struct {
|
||||
Domain *linodego.Domain
|
||||
DomainRecord *linodego.DomainRecord
|
||||
Domain linodego.Domain
|
||||
DomainRecord linodego.DomainRecord
|
||||
Options linodego.DomainRecordUpdateOptions
|
||||
}
|
||||
|
||||
// LinodeChangeDelete Linode Domain Record Deletes
|
||||
type LinodeChangeDelete struct {
|
||||
Domain *linodego.Domain
|
||||
DomainRecord *linodego.DomainRecord
|
||||
Domain linodego.Domain
|
||||
DomainRecord linodego.DomainRecord
|
||||
}
|
||||
|
||||
// NewLinodeProvider initializes a new Linode DNS based Provider.
|
||||
@ -102,7 +103,7 @@ func NewLinodeProvider(domainFilter endpoint.DomainFilter, dryRun bool, appVersi
|
||||
}
|
||||
|
||||
// Zones returns the list of hosted zones.
|
||||
func (p *LinodeProvider) Zones(ctx context.Context) ([]*linodego.Domain, error) {
|
||||
func (p *LinodeProvider) Zones(ctx context.Context) ([]linodego.Domain, error) {
|
||||
zones, err := p.fetchZones(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -144,7 +145,7 @@ func (p *LinodeProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, err
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (p *LinodeProvider) fetchRecords(ctx context.Context, domainID int) ([]*linodego.DomainRecord, error) {
|
||||
func (p *LinodeProvider) fetchRecords(ctx context.Context, domainID int) ([]linodego.DomainRecord, error) {
|
||||
records, err := p.Client.ListDomainRecords(ctx, domainID, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -153,8 +154,8 @@ func (p *LinodeProvider) fetchRecords(ctx context.Context, domainID int) ([]*lin
|
||||
return records, nil
|
||||
}
|
||||
|
||||
func (p *LinodeProvider) fetchZones(ctx context.Context) ([]*linodego.Domain, error) {
|
||||
var zones []*linodego.Domain
|
||||
func (p *LinodeProvider) fetchZones(ctx context.Context) ([]linodego.Domain, error) {
|
||||
var zones []linodego.Domain
|
||||
|
||||
allZones, err := p.Client.ListDomains(ctx, linodego.NewListOptions(0, ""))
|
||||
|
||||
@ -258,7 +259,7 @@ func getPriority() *int {
|
||||
|
||||
// ApplyChanges applies a given set of changes in a given zone.
|
||||
func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
|
||||
recordsByZoneID := make(map[string][]*linodego.DomainRecord)
|
||||
recordsByZoneID := make(map[string][]linodego.DomainRecord)
|
||||
|
||||
zones, err := p.fetchZones(ctx)
|
||||
|
||||
@ -266,7 +267,7 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
return err
|
||||
}
|
||||
|
||||
zonesByID := make(map[string]*linodego.Domain)
|
||||
zonesByID := make(map[string]linodego.Domain)
|
||||
|
||||
zoneNameIDMapper := provider.ZoneIDName{}
|
||||
|
||||
@ -290,9 +291,9 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
updatesByZone := endpointsByZone(zoneNameIDMapper, changes.UpdateNew)
|
||||
deletesByZone := endpointsByZone(zoneNameIDMapper, changes.Delete)
|
||||
|
||||
var linodeCreates []*LinodeChangeCreate
|
||||
var linodeUpdates []*LinodeChangeUpdate
|
||||
var linodeDeletes []*LinodeChangeDelete
|
||||
var linodeCreates []LinodeChangeCreate
|
||||
var linodeUpdates []LinodeChangeUpdate
|
||||
var linodeDeletes []LinodeChangeDelete
|
||||
|
||||
// Generate Creates
|
||||
for zoneID, creates := range createsByZone {
|
||||
@ -327,7 +328,7 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
}
|
||||
|
||||
for _, target := range ep.Targets {
|
||||
linodeCreates = append(linodeCreates, &LinodeChangeCreate{
|
||||
linodeCreates = append(linodeCreates, LinodeChangeCreate{
|
||||
Domain: zone,
|
||||
Options: linodego.DomainRecordCreateOptions{
|
||||
Target: target,
|
||||
@ -375,7 +376,7 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
return err
|
||||
}
|
||||
|
||||
matchedRecordsByTarget := make(map[string]*linodego.DomainRecord)
|
||||
matchedRecordsByTarget := make(map[string]linodego.DomainRecord)
|
||||
|
||||
for _, record := range matchedRecords {
|
||||
matchedRecordsByTarget[record.Target] = record
|
||||
@ -391,7 +392,7 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
"target": target,
|
||||
}).Warn("Updating Existing Target")
|
||||
|
||||
linodeUpdates = append(linodeUpdates, &LinodeChangeUpdate{
|
||||
linodeUpdates = append(linodeUpdates, LinodeChangeUpdate{
|
||||
Domain: zone,
|
||||
DomainRecord: record,
|
||||
Options: linodego.DomainRecordUpdateOptions{
|
||||
@ -416,7 +417,7 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
"target": target,
|
||||
}).Warn("Creating New Target")
|
||||
|
||||
linodeCreates = append(linodeCreates, &LinodeChangeCreate{
|
||||
linodeCreates = append(linodeCreates, LinodeChangeCreate{
|
||||
Domain: zone,
|
||||
Options: linodego.DomainRecordCreateOptions{
|
||||
Target: target,
|
||||
@ -441,12 +442,11 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
"target": record.Target,
|
||||
}).Warn("Deleting Target")
|
||||
|
||||
linodeDeletes = append(linodeDeletes, &LinodeChangeDelete{
|
||||
linodeDeletes = append(linodeDeletes, LinodeChangeDelete{
|
||||
Domain: zone,
|
||||
DomainRecord: record,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -477,7 +477,7 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
}
|
||||
|
||||
for _, record := range matchedRecords {
|
||||
linodeDeletes = append(linodeDeletes, &LinodeChangeDelete{
|
||||
linodeDeletes = append(linodeDeletes, LinodeChangeDelete{
|
||||
Domain: zone,
|
||||
DomainRecord: record,
|
||||
})
|
||||
@ -492,8 +492,8 @@ func (p *LinodeProvider) ApplyChanges(ctx context.Context, changes *plan.Changes
|
||||
})
|
||||
}
|
||||
|
||||
func endpointsByZone(zoneNameIDMapper provider.ZoneIDName, endpoints []*endpoint.Endpoint) map[string][]*endpoint.Endpoint {
|
||||
endpointsByZone := make(map[string][]*endpoint.Endpoint)
|
||||
func endpointsByZone(zoneNameIDMapper provider.ZoneIDName, endpoints []*endpoint.Endpoint) map[string][]endpoint.Endpoint {
|
||||
endpointsByZone := make(map[string][]endpoint.Endpoint)
|
||||
|
||||
for _, ep := range endpoints {
|
||||
zoneID, _ := zoneNameIDMapper.FindZone(ep.DNSName)
|
||||
@ -501,7 +501,7 @@ func endpointsByZone(zoneNameIDMapper provider.ZoneIDName, endpoints []*endpoint
|
||||
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", ep.DNSName)
|
||||
continue
|
||||
}
|
||||
endpointsByZone[zoneID] = append(endpointsByZone[zoneID], ep)
|
||||
endpointsByZone[zoneID] = append(endpointsByZone[zoneID], *ep)
|
||||
}
|
||||
|
||||
return endpointsByZone
|
||||
@ -524,7 +524,7 @@ func convertRecordType(recordType string) (linodego.DomainRecordType, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func getStrippedRecordName(zone *linodego.Domain, ep *endpoint.Endpoint) string {
|
||||
func getStrippedRecordName(zone linodego.Domain, ep endpoint.Endpoint) string {
|
||||
// Handle root
|
||||
if ep.DNSName == zone.Domain {
|
||||
return ""
|
||||
@ -533,8 +533,8 @@ func getStrippedRecordName(zone *linodego.Domain, ep *endpoint.Endpoint) string
|
||||
return strings.TrimSuffix(ep.DNSName, "."+zone.Domain)
|
||||
}
|
||||
|
||||
func getRecordID(records []*linodego.DomainRecord, zone *linodego.Domain, ep *endpoint.Endpoint) []*linodego.DomainRecord {
|
||||
var matchedRecords []*linodego.DomainRecord
|
||||
func getRecordID(records []linodego.DomainRecord, zone linodego.Domain, ep endpoint.Endpoint) []linodego.DomainRecord {
|
||||
var matchedRecords []linodego.DomainRecord
|
||||
|
||||
for _, record := range records {
|
||||
if record.Name == getStrippedRecordName(zone, ep) && string(record.Type) == ep.RecordType {
|
||||
|
@ -34,14 +34,14 @@ type MockDomainClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockDomainClient) ListDomainRecords(ctx context.Context, domainID int, opts *linodego.ListOptions) ([]*linodego.DomainRecord, error) {
|
||||
func (m *MockDomainClient) ListDomainRecords(ctx context.Context, domainID int, opts *linodego.ListOptions) ([]linodego.DomainRecord, error) {
|
||||
args := m.Called(ctx, domainID, opts)
|
||||
return args.Get(0).([]*linodego.DomainRecord), args.Error(1)
|
||||
return args.Get(0).([]linodego.DomainRecord), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockDomainClient) ListDomains(ctx context.Context, opts *linodego.ListOptions) ([]*linodego.Domain, error) {
|
||||
func (m *MockDomainClient) ListDomains(ctx context.Context, opts *linodego.ListOptions) ([]linodego.Domain, error) {
|
||||
args := m.Called(ctx, opts)
|
||||
return args.Get(0).([]*linodego.Domain), args.Error(1)
|
||||
return args.Get(0).([]linodego.Domain), args.Error(1)
|
||||
}
|
||||
func (m *MockDomainClient) CreateDomainRecord(ctx context.Context, domainID int, opts linodego.DomainRecordCreateOptions) (*linodego.DomainRecord, error) {
|
||||
args := m.Called(ctx, domainID, opts)
|
||||
@ -56,16 +56,16 @@ func (m *MockDomainClient) UpdateDomainRecord(ctx context.Context, domainID int,
|
||||
return args.Get(0).(*linodego.DomainRecord), args.Error(1)
|
||||
}
|
||||
|
||||
func createZones() []*linodego.Domain {
|
||||
return []*linodego.Domain{
|
||||
func createZones() []linodego.Domain {
|
||||
return []linodego.Domain{
|
||||
{ID: 1, Domain: "foo.com"},
|
||||
{ID: 2, Domain: "bar.io"},
|
||||
{ID: 3, Domain: "baz.com"},
|
||||
}
|
||||
}
|
||||
|
||||
func createFooRecords() []*linodego.DomainRecord {
|
||||
return []*linodego.DomainRecord{{
|
||||
func createFooRecords() []linodego.DomainRecord {
|
||||
return []linodego.DomainRecord{{
|
||||
ID: 11,
|
||||
Type: linodego.RecordTypeA,
|
||||
Name: "",
|
||||
@ -83,12 +83,12 @@ func createFooRecords() []*linodego.DomainRecord {
|
||||
}}
|
||||
}
|
||||
|
||||
func createBarRecords() []*linodego.DomainRecord {
|
||||
return []*linodego.DomainRecord{}
|
||||
func createBarRecords() []linodego.DomainRecord {
|
||||
return []linodego.DomainRecord{}
|
||||
}
|
||||
|
||||
func createBazRecords() []*linodego.DomainRecord {
|
||||
return []*linodego.DomainRecord{{
|
||||
func createBazRecords() []linodego.DomainRecord {
|
||||
return []linodego.DomainRecord{{
|
||||
ID: 31,
|
||||
Type: linodego.RecordTypeA,
|
||||
Name: "",
|
||||
@ -147,15 +147,15 @@ func TestNewLinodeProvider(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLinodeStripRecordName(t *testing.T) {
|
||||
assert.Equal(t, "api", getStrippedRecordName(&linodego.Domain{
|
||||
assert.Equal(t, "api", getStrippedRecordName(linodego.Domain{
|
||||
Domain: "example.com",
|
||||
}, &endpoint.Endpoint{
|
||||
}, endpoint.Endpoint{
|
||||
DNSName: "api.example.com",
|
||||
}))
|
||||
|
||||
assert.Equal(t, "", getStrippedRecordName(&linodego.Domain{
|
||||
assert.Equal(t, "", getStrippedRecordName(linodego.Domain{
|
||||
Domain: "example.com",
|
||||
}, &endpoint.Endpoint{
|
||||
}, endpoint.Endpoint{
|
||||
DNSName: "example.com",
|
||||
}))
|
||||
}
|
||||
@ -198,7 +198,7 @@ func TestLinodeFetchZonesWithFilter(t *testing.T) {
|
||||
mock.Anything,
|
||||
).Return(createZones(), nil).Once()
|
||||
|
||||
expected := []*linodego.Domain{
|
||||
expected := []linodego.Domain{
|
||||
{ID: 1, Domain: "foo.com"},
|
||||
{ID: 3, Domain: "baz.com"},
|
||||
}
|
||||
@ -210,15 +210,15 @@ func TestLinodeFetchZonesWithFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLinodeGetStrippedRecordName(t *testing.T) {
|
||||
assert.Equal(t, "", getStrippedRecordName(&linodego.Domain{
|
||||
assert.Equal(t, "", getStrippedRecordName(linodego.Domain{
|
||||
Domain: "foo.com",
|
||||
}, &endpoint.Endpoint{
|
||||
}, endpoint.Endpoint{
|
||||
DNSName: "foo.com",
|
||||
}))
|
||||
|
||||
assert.Equal(t, "api", getStrippedRecordName(&linodego.Domain{
|
||||
assert.Equal(t, "api", getStrippedRecordName(linodego.Domain{
|
||||
Domain: "foo.com",
|
||||
}, &endpoint.Endpoint{
|
||||
}, endpoint.Endpoint{
|
||||
DNSName: "api.foo.com",
|
||||
}))
|
||||
}
|
||||
@ -398,14 +398,14 @@ func TestLinodeApplyChangesTargetAdded(t *testing.T) {
|
||||
"ListDomains",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return([]*linodego.Domain{{Domain: "example.com", ID: 1}}, nil).Once()
|
||||
).Return([]linodego.Domain{{Domain: "example.com", ID: 1}}, nil).Once()
|
||||
|
||||
mockDomainClient.On(
|
||||
"ListDomainRecords",
|
||||
mock.Anything,
|
||||
1,
|
||||
mock.Anything,
|
||||
).Return([]*linodego.DomainRecord{{ID: 11, Name: "", Type: "A", Target: "targetA"}}, nil).Once()
|
||||
).Return([]linodego.DomainRecord{{ID: 11, Name: "", Type: "A", Target: "targetA"}}, nil).Once()
|
||||
|
||||
// Apply Actions
|
||||
mockDomainClient.On(
|
||||
@ -457,14 +457,14 @@ func TestLinodeApplyChangesTargetRemoved(t *testing.T) {
|
||||
"ListDomains",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return([]*linodego.Domain{{Domain: "example.com", ID: 1}}, nil).Once()
|
||||
).Return([]linodego.Domain{{Domain: "example.com", ID: 1}}, nil).Once()
|
||||
|
||||
mockDomainClient.On(
|
||||
"ListDomainRecords",
|
||||
mock.Anything,
|
||||
1,
|
||||
mock.Anything,
|
||||
).Return([]*linodego.DomainRecord{{ID: 11, Name: "", Type: "A", Target: "targetA"}, {ID: 12, Type: "A", Name: "", Target: "targetB"}}, nil).Once()
|
||||
).Return([]linodego.DomainRecord{{ID: 11, Name: "", Type: "A", Target: "targetA"}, {ID: 12, Type: "A", Name: "", Target: "targetB"}}, nil).Once()
|
||||
|
||||
// Apply Actions
|
||||
mockDomainClient.On(
|
||||
@ -513,14 +513,14 @@ func TestLinodeApplyChangesNoChanges(t *testing.T) {
|
||||
"ListDomains",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return([]*linodego.Domain{{Domain: "example.com", ID: 1}}, nil).Once()
|
||||
).Return([]linodego.Domain{{Domain: "example.com", ID: 1}}, nil).Once()
|
||||
|
||||
mockDomainClient.On(
|
||||
"ListDomainRecords",
|
||||
mock.Anything,
|
||||
1,
|
||||
mock.Anything,
|
||||
).Return([]*linodego.DomainRecord{{ID: 11, Name: "", Type: "A", Target: "targetA"}}, nil).Once()
|
||||
).Return([]linodego.DomainRecord{{ID: 11, Name: "", Type: "A", Target: "targetA"}}, nil).Once()
|
||||
|
||||
err := provider.ApplyChanges(context.Background(), &plan.Changes{})
|
||||
require.NoError(t, err)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user