From 9e584fb15d19ffb3a5bc2d9d07853724b93f1fa3 Mon Sep 17 00:00:00 2001 From: Matt Dennison Date: Wed, 13 May 2020 15:58:23 -0700 Subject: [PATCH 01/46] chore: add minTTLSeconds --- provider/ns1/ns1.go | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/provider/ns1/ns1.go b/provider/ns1/ns1.go index a30254cd6..9f2600698 100644 --- a/provider/ns1/ns1.go +++ b/provider/ns1/ns1.go @@ -85,19 +85,21 @@ func (n NS1DomainService) ListZones() ([]*dns.Zone, *http.Response, error) { // NS1Config passes cli args to the NS1Provider type NS1Config struct { - DomainFilter endpoint.DomainFilter - ZoneIDFilter provider.ZoneIDFilter - NS1Endpoint string - NS1IgnoreSSL bool - DryRun bool + DomainFilter endpoint.DomainFilter + ZoneIDFilter provider.ZoneIDFilter + NS1Endpoint string + NS1IgnoreSSL bool + DryRun bool + MinTTLSeconds int } // NS1Provider is the NS1 provider type NS1Provider struct { - client NS1DomainClient - domainFilter endpoint.DomainFilter - zoneIDFilter provider.ZoneIDFilter - dryRun bool + client NS1DomainClient + domainFilter endpoint.DomainFilter + zoneIDFilter provider.ZoneIDFilter + dryRun bool + minTTLSeconds int } // NewNS1Provider creates a new NS1 Provider @@ -134,9 +136,10 @@ func newNS1ProviderWithHTTPClient(config NS1Config, client *http.Client) (*NS1Pr apiClient := api.NewClient(client, clientArgs...) provider := &NS1Provider{ - client: NS1DomainService{apiClient}, - domainFilter: config.DomainFilter, - zoneIDFilter: config.ZoneIDFilter, + client: NS1DomainService{apiClient}, + domainFilter: config.DomainFilter, + zoneIDFilter: config.ZoneIDFilter, + minTTLSeconds: config.MinTTLSeconds, } return provider, nil } From 6176d8151d2c10f38098876483de9c5cf86b8337 Mon Sep 17 00:00:00 2001 From: Matt Dennison Date: Thu, 14 May 2020 08:58:54 -0700 Subject: [PATCH 02/46] feat: ns1 min-ttl-seconds --- main.go | 11 ++++++----- pkg/apis/externaldns/types.go | 2 ++ provider/ns1/ns1.go | 9 ++++++--- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/main.go b/main.go index 871ec1c97..64e561856 100644 --- a/main.go +++ b/main.go @@ -278,11 +278,12 @@ func main() { case "ns1": p, err = ns1.NewNS1Provider( ns1.NS1Config{ - DomainFilter: domainFilter, - ZoneIDFilter: zoneIDFilter, - NS1Endpoint: cfg.NS1Endpoint, - NS1IgnoreSSL: cfg.NS1IgnoreSSL, - DryRun: cfg.DryRun, + DomainFilter: domainFilter, + ZoneIDFilter: zoneIDFilter, + NS1Endpoint: cfg.NS1Endpoint, + NS1IgnoreSSL: cfg.NS1IgnoreSSL, + DryRun: cfg.DryRun, + MinTTLSeconds: cfg.NS1MinTTLSeconds, }, ) case "transip": diff --git a/pkg/apis/externaldns/types.go b/pkg/apis/externaldns/types.go index 385427d9d..7b5b75188 100644 --- a/pkg/apis/externaldns/types.go +++ b/pkg/apis/externaldns/types.go @@ -137,6 +137,7 @@ type Config struct { RFC2136MinTTL time.Duration NS1Endpoint string NS1IgnoreSSL bool + NS1MinTTLSeconds int TransIPAccountName string TransIPPrivateKeyFile string } @@ -361,6 +362,7 @@ func (cfg *Config) ParseFlags(args []string) error { app.Flag("pdns-tls-enabled", "When using the PowerDNS/PDNS provider, specify whether to use TLS (default: false, requires --tls-ca, optionally specify --tls-client-cert and --tls-client-cert-key)").Default(strconv.FormatBool(defaultConfig.PDNSTLSEnabled)).BoolVar(&cfg.PDNSTLSEnabled) app.Flag("ns1-endpoint", "When using the NS1 provider, specify the URL of the API endpoint to target (default: https://api.nsone.net/v1/)").Default(defaultConfig.NS1Endpoint).StringVar(&cfg.NS1Endpoint) app.Flag("ns1-ignoressl", "When using the NS1 provider, specify whether to verify the SSL certificate (default: false)").Default(strconv.FormatBool(defaultConfig.NS1IgnoreSSL)).BoolVar(&cfg.NS1IgnoreSSL) + app.Flag("ns1-min-ttl", "Minimal TTL (in seconds) for records. This value will be used if the provided TTL for a service/ingress is lower than this.").IntVar(&cfg.NS1MinTTLSeconds) // Flags related to TLS communication app.Flag("tls-ca", "When using TLS communication, the path to the certificate authority to verify server communications (optionally specify --tls-client-cert for two-way TLS)").Default(defaultConfig.TLSCA).StringVar(&cfg.TLSCA) diff --git a/provider/ns1/ns1.go b/provider/ns1/ns1.go index 9f2600698..82ebafc14 100644 --- a/provider/ns1/ns1.go +++ b/provider/ns1/ns1.go @@ -178,13 +178,16 @@ func (p *NS1Provider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) } // ns1BuildRecord returns a dns.Record for a change set -func ns1BuildRecord(zoneName string, change *ns1Change) *dns.Record { +func (p *NS1Provider) ns1BuildRecord(zoneName string, change *ns1Change) *dns.Record { record := dns.NewRecord(zoneName, change.Endpoint.DNSName, change.Endpoint.RecordType) for _, v := range change.Endpoint.Targets { record.AddAnswer(dns.NewAnswer(strings.Split(v, " "))) } - // set detault ttl + // set detault ttl, but respect minTTLSeconds var ttl = ns1DefaultTTL + if p.minTTLSeconds > ttl { + ttl = p.minTTLSeconds + } if change.Endpoint.RecordTTL.IsConfigured() { ttl = int(change.Endpoint.RecordTTL) } @@ -209,7 +212,7 @@ func (p *NS1Provider) ns1SubmitChanges(changes []*ns1Change) error { changesByZone := ns1ChangesByZone(zones, changes) for zoneName, changes := range changesByZone { for _, change := range changes { - record := ns1BuildRecord(zoneName, change) + record := p.ns1BuildRecord(zoneName, change) logFields := log.Fields{ "record": record.Domain, "type": record.Type, From c74f22e12326be4aa8253ccf7cf1a3cd9266a7bb Mon Sep 17 00:00:00 2001 From: Matt Dennison Date: Thu, 14 May 2020 09:34:49 -0700 Subject: [PATCH 03/46] test(ns1): minTTLSeconds changes --- provider/ns1/ns1.go | 2 +- provider/ns1/ns1_test.go | 25 +++++++++++++++++-------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/provider/ns1/ns1.go b/provider/ns1/ns1.go index 82ebafc14..e859fa4f2 100644 --- a/provider/ns1/ns1.go +++ b/provider/ns1/ns1.go @@ -183,7 +183,7 @@ func (p *NS1Provider) ns1BuildRecord(zoneName string, change *ns1Change) *dns.Re for _, v := range change.Endpoint.Targets { record.AddAnswer(dns.NewAnswer(strings.Split(v, " "))) } - // set detault ttl, but respect minTTLSeconds + // set default ttl, but respect minTTLSeconds var ttl = ns1DefaultTTL if p.minTTLSeconds > ttl { ttl = p.minTTLSeconds diff --git a/provider/ns1/ns1_test.go b/provider/ns1/ns1_test.go index f4309ec50..6d529879b 100644 --- a/provider/ns1/ns1_test.go +++ b/provider/ns1/ns1_test.go @@ -129,9 +129,10 @@ func (m *MockNS1ListZonesFail) ListZones() ([]*dns.Zone, *http.Response, error) func TestNS1Records(t *testing.T) { provider := &NS1Provider{ - client: &MockNS1DomainClient{}, - domainFilter: endpoint.NewDomainFilter([]string{"foo.com."}), - zoneIDFilter: provider.NewZoneIDFilter([]string{""}), + client: &MockNS1DomainClient{}, + domainFilter: endpoint.NewDomainFilter([]string{"foo.com."}), + zoneIDFilter: provider.NewZoneIDFilter([]string{""}), + minTTLSeconds: 3600, } ctx := context.Background() @@ -195,10 +196,18 @@ func TestNS1BuildRecord(t *testing.T) { RecordType: "A", }, } - record := ns1BuildRecord("foo.com", change) + + provider := &NS1Provider{ + client: &MockNS1DomainClient{}, + domainFilter: endpoint.NewDomainFilter([]string{"foo.com."}), + zoneIDFilter: provider.NewZoneIDFilter([]string{""}), + minTTLSeconds: 300, + } + + record := provider.ns1BuildRecord("foo.com", change) assert.Equal(t, "foo.com", record.Zone) assert.Equal(t, "new.foo.com", record.Domain) - assert.Equal(t, ns1DefaultTTL, record.TTL) + assert.Equal(t, 300, record.TTL) changeWithTTL := &ns1Change{ Action: ns1Create, @@ -206,13 +215,13 @@ func TestNS1BuildRecord(t *testing.T) { DNSName: "new-b", Targets: endpoint.Targets{"target"}, RecordType: "A", - RecordTTL: 100, + RecordTTL: 3600, }, } - record = ns1BuildRecord("foo.com", changeWithTTL) + record = provider.ns1BuildRecord("foo.com", changeWithTTL) assert.Equal(t, "foo.com", record.Zone) assert.Equal(t, "new-b.foo.com", record.Domain) - assert.Equal(t, 100, record.TTL) + assert.Equal(t, 3600, record.TTL) } func TestNS1ApplyChanges(t *testing.T) { From 5beb528c327b84ada2c3384a85d9674a9437cbc3 Mon Sep 17 00:00:00 2001 From: Joseph Glanville Date: Fri, 12 Jun 2020 16:16:56 +0700 Subject: [PATCH 04/46] Contour HTTPProxy support --- pkg/apis/externaldns/types.go | 2 +- source/httpproxy.go | 340 ++++++++++ source/httpproxy_test.go | 1081 ++++++++++++++++++++++++++++++ source/ingressroute.go | 45 +- source/ingressroute_test.go | 9 +- source/store.go | 6 + source/store_test.go | 6 +- source/unstructured_converter.go | 32 + 8 files changed, 1479 insertions(+), 42 deletions(-) create mode 100644 source/httpproxy.go create mode 100644 source/httpproxy_test.go create mode 100644 source/unstructured_converter.go diff --git a/pkg/apis/externaldns/types.go b/pkg/apis/externaldns/types.go index be557ef1e..7d52f5032 100644 --- a/pkg/apis/externaldns/types.go +++ b/pkg/apis/externaldns/types.go @@ -300,7 +300,7 @@ func (cfg *Config) ParseFlags(args []string) error { app.Flag("skipper-routegroup-groupversion", "The resource version for skipper routegroup").Default(source.DefaultRoutegroupVersion).StringVar(&cfg.SkipperRouteGroupVersion) // Flags related to processing sources - app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, crd, empty, skipper-routegroup,openshift-route)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route") + app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, contour-httpproxy, crd, empty, skipper-routegroup,openshift-route)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "contour-httpproxy", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route") app.Flag("namespace", "Limit sources of endpoints to a specific namespace (default: all namespaces)").Default(defaultConfig.Namespace).StringVar(&cfg.Namespace) app.Flag("annotation-filter", "Filter sources managed by external-dns via annotation using label selector semantics (default: all sources)").Default(defaultConfig.AnnotationFilter).StringVar(&cfg.AnnotationFilter) diff --git a/source/httpproxy.go b/source/httpproxy.go new file mode 100644 index 000000000..c4af473b6 --- /dev/null +++ b/source/httpproxy.go @@ -0,0 +1,340 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "bytes" + "context" + "fmt" + "sort" + "strings" + "text/template" + "time" + + "github.com/pkg/errors" + projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/external-dns/endpoint" +) + +// HTTPProxySource is an implementation of Source for ProjectContour HTTPProxy objects. +// The HTTPProxy implementation uses the spec.virtualHost.fqdn value for the hostname. +// Use targetAnnotationKey to explicitly set Endpoint. +type httpProxySource struct { + dynamicKubeClient dynamic.Interface + namespace string + annotationFilter string + fqdnTemplate *template.Template + combineFQDNAnnotation bool + ignoreHostnameAnnotation bool + httpProxyInformer informers.GenericInformer + unstructuredConverter *UnstructuredConverter +} + +// NewContourHTTPProxySource creates a new contourHTTPProxySource with the given config. +func NewContourHTTPProxySource( + dynamicKubeClient dynamic.Interface, + namespace string, + annotationFilter string, + fqdnTemplate string, + combineFqdnAnnotation bool, + ignoreHostnameAnnotation bool, +) (Source, error) { + var ( + tmpl *template.Template + err error + ) + if fqdnTemplate != "" { + tmpl, err = template.New("endpoint").Funcs(template.FuncMap{ + "trimPrefix": strings.TrimPrefix, + }).Parse(fqdnTemplate) + if err != nil { + return nil, err + } + } + + // Use shared informer to listen for add/update/delete of HTTPProxys in the specified namespace. + // Set resync period to 0, to prevent processing when nothing has changed. + informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicKubeClient, 0, namespace, nil) + httpProxyInformer := informerFactory.ForResource(projectcontour.HTTPProxyGVR) + + // Add default resource event handlers to properly initialize informer. + httpProxyInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + }, + }, + ) + + // TODO informer is not explicitly stopped since controller is not passing in its channel. + informerFactory.Start(wait.NeverStop) + + // wait for the local cache to be populated. + err = poll(time.Second, 60*time.Second, func() (bool, error) { + return httpProxyInformer.Informer().HasSynced(), nil + }) + if err != nil { + return nil, fmt.Errorf("failed to sync cache: %v", err) + } + + uc, err := NewUnstructuredConverter() + if err != nil { + return nil, fmt.Errorf("failed to setup Unstructured Converter: %v", err) + } + + return &httpProxySource{ + dynamicKubeClient: dynamicKubeClient, + namespace: namespace, + annotationFilter: annotationFilter, + fqdnTemplate: tmpl, + combineFQDNAnnotation: combineFqdnAnnotation, + ignoreHostnameAnnotation: ignoreHostnameAnnotation, + httpProxyInformer: httpProxyInformer, + unstructuredConverter: uc, + }, nil +} + +// Endpoints returns endpoint objects for each host-target combination that should be processed. +// Retrieves all HTTPProxy resources in the source's namespace(s). +func (sc *httpProxySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) { + hps, err := sc.httpProxyInformer.Lister().ByNamespace(sc.namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + // Convert to []*projectcontour.HTTPProxy + var httpProxies []*projectcontour.HTTPProxy + for _, hp := range hps { + unstrucuredHP, ok := hp.(*unstructured.Unstructured) + if !ok { + return nil, errors.New("could not convert") + } + + hpConverted := &projectcontour.HTTPProxy{} + err := sc.unstructuredConverter.scheme.Convert(unstrucuredHP, hpConverted, nil) + if err != nil { + return nil, err + } + httpProxies = append(httpProxies, hpConverted) + } + + httpProxies, err = sc.filterByAnnotations(httpProxies) + if err != nil { + return nil, err + } + + endpoints := []*endpoint.Endpoint{} + + for _, hp := range httpProxies { + // Check controller annotation to see if we are responsible. + controller, ok := hp.Annotations[controllerAnnotationKey] + if ok && controller != controllerAnnotationValue { + log.Debugf("Skipping HTTPProxy %s/%s because controller value does not match, found: %s, required: %s", + hp.Namespace, hp.Name, controller, controllerAnnotationValue) + continue + } else if hp.Status.CurrentStatus != "valid" { + log.Debugf("Skipping HTTPProxy %s/%s because it is not valid", hp.Namespace, hp.Name) + continue + } + + hpEndpoints, err := sc.endpointsFromHTTPProxy(hp) + if err != nil { + return nil, err + } + + // apply template if fqdn is missing on HTTPProxy + if (sc.combineFQDNAnnotation || len(hpEndpoints) == 0) && sc.fqdnTemplate != nil { + tmplEndpoints, err := sc.endpointsFromTemplate(hp) + if err != nil { + return nil, err + } + + if sc.combineFQDNAnnotation { + hpEndpoints = append(hpEndpoints, tmplEndpoints...) + } else { + hpEndpoints = tmplEndpoints + } + } + + if len(hpEndpoints) == 0 { + log.Debugf("No endpoints could be generated from HTTPProxy %s/%s", hp.Namespace, hp.Name) + continue + } + + log.Debugf("Endpoints generated from HTTPProxy: %s/%s: %v", hp.Namespace, hp.Name, hpEndpoints) + sc.setResourceLabel(hp, hpEndpoints) + endpoints = append(endpoints, hpEndpoints...) + } + + for _, ep := range endpoints { + sort.Sort(ep.Targets) + } + + return endpoints, nil +} + +func (sc *httpProxySource) endpointsFromTemplate(httpProxy *projectcontour.HTTPProxy) ([]*endpoint.Endpoint, error) { + // Process the whole template string + var buf bytes.Buffer + err := sc.fqdnTemplate.Execute(&buf, httpProxy) + if err != nil { + return nil, fmt.Errorf("failed to apply template on HTTPProxy %s/%s: %v", httpProxy.Namespace, httpProxy.Name, err) + } + + hostnames := buf.String() + + ttl, err := getTTLFromAnnotations(httpProxy.Annotations) + if err != nil { + log.Warn(err) + } + + targets := getTargetsFromTargetAnnotation(httpProxy.Annotations) + + if len(targets) == 0 { + for _, lb := range httpProxy.Status.LoadBalancer.Ingress { + if lb.IP != "" { + targets = append(targets, lb.IP) + } + if lb.Hostname != "" { + targets = append(targets, lb.Hostname) + } + } + } + + providerSpecific, setIdentifier := getProviderSpecificAnnotations(httpProxy.Annotations) + + var endpoints []*endpoint.Endpoint + // splits the FQDN template and removes the trailing periods + hostnameList := strings.Split(strings.Replace(hostnames, " ", "", -1), ",") + for _, hostname := range hostnameList { + hostname = strings.TrimSuffix(hostname, ".") + endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...) + } + return endpoints, nil +} + +// filterByAnnotations filters a list of configs by a given annotation selector. +func (sc *httpProxySource) filterByAnnotations(httpProxies []*projectcontour.HTTPProxy) ([]*projectcontour.HTTPProxy, error) { + labelSelector, err := metav1.ParseToLabelSelector(sc.annotationFilter) + if err != nil { + return nil, err + } + selector, err := metav1.LabelSelectorAsSelector(labelSelector) + if err != nil { + return nil, err + } + + // empty filter returns original list + if selector.Empty() { + return httpProxies, nil + } + + filteredList := []*projectcontour.HTTPProxy{} + + for _, httpProxy := range httpProxies { + // convert the HTTPProxy's annotations to an equivalent label selector + annotations := labels.Set(httpProxy.Annotations) + + // include HTTPProxy if its annotations match the selector + if selector.Matches(annotations) { + filteredList = append(filteredList, httpProxy) + } + } + + return filteredList, nil +} + +func (sc *httpProxySource) setResourceLabel(httpProxy *projectcontour.HTTPProxy, endpoints []*endpoint.Endpoint) { + for _, ep := range endpoints { + ep.Labels[endpoint.ResourceLabelKey] = fmt.Sprintf("HTTPProxy/%s/%s", httpProxy.Namespace, httpProxy.Name) + } +} + +// endpointsFromHTTPProxyConfig extracts the endpoints from a Contour HTTPProxy object +func (sc *httpProxySource) endpointsFromHTTPProxy(httpProxy *projectcontour.HTTPProxy) ([]*endpoint.Endpoint, error) { + if httpProxy.Status.CurrentStatus != "valid" { + log.Warn(errors.Errorf("cannot generate endpoints for HTTPProxy with status %s", httpProxy.Status.CurrentStatus)) + return nil, nil + } + + var endpoints []*endpoint.Endpoint + + ttl, err := getTTLFromAnnotations(httpProxy.Annotations) + if err != nil { + log.Warn(err) + } + + targets := getTargetsFromTargetAnnotation(httpProxy.Annotations) + + if len(targets) == 0 { + for _, lb := range httpProxy.Status.LoadBalancer.Ingress { + if lb.IP != "" { + targets = append(targets, lb.IP) + } + if lb.Hostname != "" { + targets = append(targets, lb.Hostname) + } + } + } + + providerSpecific, setIdentifier := getProviderSpecificAnnotations(httpProxy.Annotations) + + if virtualHost := httpProxy.Spec.VirtualHost; virtualHost != nil { + if fqdn := virtualHost.Fqdn; fqdn != "" { + endpoints = append(endpoints, endpointsForHostname(fqdn, targets, ttl, providerSpecific, setIdentifier)...) + } + } + + // Skip endpoints if we do not want entries from annotations + if !sc.ignoreHostnameAnnotation { + hostnameList := getHostnamesFromAnnotations(httpProxy.Annotations) + for _, hostname := range hostnameList { + endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...) + } + } + + return endpoints, nil +} + +func (sc *httpProxySource) AddEventHandler(ctx context.Context, handler func()) { + log.Debug("Adding event handler for httpproxy") + + // Right now there is no way to remove event handler from informer, see: + // https://github.com/kubernetes/kubernetes/issues/79610 + sc.httpProxyInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + handler() + }, + UpdateFunc: func(old interface{}, new interface{}) { + handler() + }, + DeleteFunc: func(obj interface{}) { + handler() + }, + }, + ) +} diff --git a/source/httpproxy_test.go b/source/httpproxy_test.go new file mode 100644 index 000000000..8e0d5da9d --- /dev/null +++ b/source/httpproxy_test.go @@ -0,0 +1,1081 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + v1 "k8s.io/api/core/v1" + "testing" + + "github.com/pkg/errors" + projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/external-dns/endpoint" +) + +// This is a compile-time validation that httpProxySource is a Source. +var _ Source = &httpProxySource{} + +type HTTPProxySuite struct { + suite.Suite + source Source + httpProxy *projectcontour.HTTPProxy +} + +func (suite *HTTPProxySuite) SetupTest() { + fakeDynamicClient, s := newDynamicKubernetesClient() + var err error + + suite.source, err = NewContourHTTPProxySource( + fakeDynamicClient, + "default", + "", + "{{.Name}}", + false, + false, + ) + suite.NoError(err, "should initialize httpproxy source") + + suite.httpProxy = (fakeHTTPProxy{ + name: "foo-httpproxy-with-targets", + namespace: "default", + host: "example.com", + }).HTTPProxy() + + // Convert to unstructured + unstructuredHTTPProxy, err := convertHTTPProxyToUnstructured(suite.httpProxy, s) + if err != nil { + suite.Error(err) + } + + _, err = fakeDynamicClient.Resource(projectcontour.HTTPProxyGVR).Namespace(suite.httpProxy.Namespace).Create(context.Background(), unstructuredHTTPProxy, metav1.CreateOptions{}) + suite.NoError(err, "should succeed") +} + +func (suite *HTTPProxySuite) TestResourceLabelIsSet() { + endpoints, _ := suite.source.Endpoints(context.Background()) + for _, ep := range endpoints { + suite.Equal("httpproxy/default/foo-httpproxy-with-targets", ep.Labels[endpoint.ResourceLabelKey], "should set correct resource label") + } +} + +func convertHTTPProxyToUnstructured(hp *projectcontour.HTTPProxy, s *runtime.Scheme) (*unstructured.Unstructured, error) { + unstructuredHTTPProxy := &unstructured.Unstructured{} + if err := s.Convert(hp, unstructuredHTTPProxy, context.Background()); err != nil { + return nil, err + } + return unstructuredHTTPProxy, nil +} + +func TestHTTPProxy(t *testing.T) { + suite.Run(t, new(HTTPProxySuite)) + t.Run("endpointsFromHTTPProxy", testEndpointsFromHTTPProxy) + t.Run("Endpoints", testHTTPProxyEndpoints) +} + +func TestNewContourHTTPProxySource(t *testing.T) { + for _, ti := range []struct { + title string + annotationFilter string + fqdnTemplate string + combineFQDNAndAnnotation bool + expectError bool + }{ + { + title: "invalid template", + expectError: true, + fqdnTemplate: "{{.Name", + }, + { + title: "valid empty template", + expectError: false, + }, + { + title: "valid template", + expectError: false, + fqdnTemplate: "{{.Name}}-{{.Namespace}}.ext-dns.test.com", + }, + { + title: "valid template", + expectError: false, + fqdnTemplate: "{{.Name}}-{{.Namespace}}.ext-dns.test.com, {{.Name}}-{{.Namespace}}.ext-dna.test.com", + }, + { + title: "valid template", + expectError: false, + fqdnTemplate: "{{.Name}}-{{.Namespace}}.ext-dns.test.com, {{.Name}}-{{.Namespace}}.ext-dna.test.com", + combineFQDNAndAnnotation: true, + }, + { + title: "non-empty annotation filter label", + expectError: false, + annotationFilter: "contour.heptio.com/ingress.class=contour", + }, + } { + t.Run(ti.title, func(t *testing.T) { + fakeDynamicClient, _ := newDynamicKubernetesClient() + + _, err := NewContourHTTPProxySource( + fakeDynamicClient, + "", + ti.annotationFilter, + ti.fqdnTemplate, + ti.combineFQDNAndAnnotation, + false, + ) + if ti.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func testEndpointsFromHTTPProxy(t *testing.T) { + for _, ti := range []struct { + title string + httpProxy fakeHTTPProxy + expected []*endpoint.Endpoint + }{ + { + title: "one rule.host one lb.hostname", + httpProxy: fakeHTTPProxy{ + host: "foo.bar", // Kubernetes requires removal of trailing dot + loadBalancer: fakeLoadBalancerService{ + hostnames: []string{"lb.com"}, // Kubernetes omits the trailing dot + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "foo.bar", + Targets: endpoint.Targets{"lb.com"}, + }, + }, + }, + { + title: "one rule.host one lb.IP", + httpProxy: fakeHTTPProxy{ + host: "foo.bar", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "foo.bar", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + }, + }, + { + title: "one rule.host two lb.IP and two lb.Hostname", + httpProxy: fakeHTTPProxy{ + host: "foo.bar", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8", "127.0.0.1"}, + hostnames: []string{"elb.com", "alb.com"}, + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "foo.bar", + Targets: endpoint.Targets{"8.8.8.8", "127.0.0.1"}, + }, + { + DNSName: "foo.bar", + Targets: endpoint.Targets{"elb.com", "alb.com"}, + }, + }, + }, + { + title: "no rule.host", + httpProxy: fakeHTTPProxy{}, + expected: []*endpoint.Endpoint{}, + }, + { + title: "one rule.host invalid httpproxy", + httpProxy: fakeHTTPProxy{ + host: "foo.bar", + invalid: true, + }, + expected: []*endpoint.Endpoint{}, + }, + { + title: "no targets", + httpProxy: fakeHTTPProxy{}, + expected: []*endpoint.Endpoint{}, + }, + { + title: "delegate httpproxy", + httpProxy: fakeHTTPProxy{ + delegate: true, + }, + expected: []*endpoint.Endpoint{}, + }, + } { + t.Run(ti.title, func(t *testing.T) { + if source, err := newTestHTTPProxySource(); err != nil { + require.NoError(t, err) + } else if endpoints, err := source.endpointsFromHTTPProxy(ti.httpProxy.HTTPProxy()); err != nil { + require.NoError(t, err) + } else { + validateEndpoints(t, endpoints, ti.expected) + } + }) + } +} + +func testHTTPProxyEndpoints(t *testing.T) { + namespace := "testing" + for _, ti := range []struct { + title string + targetNamespace string + annotationFilter string + loadBalancer fakeLoadBalancerService + httpProxyItems []fakeHTTPProxy + expected []*endpoint.Endpoint + expectError bool + fqdnTemplate string + combineFQDNAndAnnotation bool + ignoreHostnameAnnotation bool + }{ + { + title: "no httpproxy", + targetNamespace: "", + }, + { + title: "two simple httpproxys", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + hostnames: []string{"lb.com"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + host: "example.org", + }, + { + name: "fake2", + namespace: namespace, + host: "new.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "example.org", + Targets: endpoint.Targets{"lb.com"}, + }, + { + DNSName: "new.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "new.org", + Targets: endpoint.Targets{"lb.com"}, + }, + }, + }, + { + title: "two simple httpproxys on different namespaces", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + hostnames: []string{"lb.com"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: "testing1", + host: "example.org", + }, + { + name: "fake2", + namespace: "testing2", + host: "new.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "example.org", + Targets: endpoint.Targets{"lb.com"}, + }, + { + DNSName: "new.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "new.org", + Targets: endpoint.Targets{"lb.com"}, + }, + }, + }, + { + title: "two simple httpproxys on different namespaces and a target namespace", + targetNamespace: "testing1", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + hostnames: []string{"lb.com"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: "testing1", + host: "example.org", + }, + { + name: "fake2", + namespace: "testing2", + host: "new.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "example.org", + Targets: endpoint.Targets{"lb.com"}, + }, + }, + }, + { + title: "valid matching annotation filter expression", + targetNamespace: "", + annotationFilter: "contour.heptio.com/ingress.class in (alb, contour)", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + "contour.heptio.com/ingress.class": "contour", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + }, + }, + { + title: "valid non-matching annotation filter expression", + targetNamespace: "", + annotationFilter: "contour.heptio.com/ingress.class in (alb, contour)", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + "contour.heptio.com/ingress.class": "tectonic", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{}, + }, + { + title: "invalid annotation filter expression", + targetNamespace: "", + annotationFilter: "contour.heptio.com/ingress.name in (a b)", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + "contour.heptio.com/ingress.class": "alb", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{}, + expectError: true, + }, + { + title: "valid matching annotation filter label", + targetNamespace: "", + annotationFilter: "contour.heptio.com/ingress.class=contour", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + "contour.heptio.com/ingress.class": "contour", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + }, + }, + { + title: "valid non-matching annotation filter label", + targetNamespace: "", + annotationFilter: "contour.heptio.com/ingress.class=contour", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + "contour.heptio.com/ingress.class": "alb", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{}, + }, + { + title: "our controller type is dns-controller", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + controllerAnnotationKey: controllerAnnotationValue, + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + }, + }, + { + title: "different controller types are ignored", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + controllerAnnotationKey: "some-other-tool", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{}, + }, + { + title: "template for httpproxy if host is missing", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + hostnames: []string{"elb.com"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + controllerAnnotationKey: controllerAnnotationValue, + }, + host: "", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "fake1.ext-dns.test.com", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "fake1.ext-dns.test.com", + Targets: endpoint.Targets{"elb.com"}, + }, + }, + fqdnTemplate: "{{.Name}}.ext-dns.test.com", + }, + { + title: "another controller annotation skipped even with template", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + controllerAnnotationKey: "other-controller", + }, + host: "", + }, + }, + expected: []*endpoint.Endpoint{}, + fqdnTemplate: "{{.Name}}.ext-dns.test.com", + }, + { + title: "multiple FQDN template hostnames", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{}, + host: "", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "fake1.ext-dns.test.com", + Targets: endpoint.Targets{"8.8.8.8"}, + RecordType: endpoint.RecordTypeA, + }, + { + DNSName: "fake1.ext-dna.test.com", + Targets: endpoint.Targets{"8.8.8.8"}, + RecordType: endpoint.RecordTypeA, + }, + }, + fqdnTemplate: "{{.Name}}.ext-dns.test.com, {{.Name}}.ext-dna.test.com", + }, + { + title: "multiple FQDN template hostnames", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{}, + host: "", + }, + { + name: "fake2", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "fake1.ext-dns.test.com", + Targets: endpoint.Targets{"8.8.8.8"}, + RecordType: endpoint.RecordTypeA, + }, + { + DNSName: "fake1.ext-dna.test.com", + Targets: endpoint.Targets{"8.8.8.8"}, + RecordType: endpoint.RecordTypeA, + }, + { + DNSName: "example.org", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + { + DNSName: "fake2.ext-dns.test.com", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + { + DNSName: "fake2.ext-dna.test.com", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + }, + fqdnTemplate: "{{.Name}}.ext-dns.test.com, {{.Name}}.ext-dna.test.com", + combineFQDNAndAnnotation: true, + }, + { + title: "httpproxy rules with annotation", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + }, + host: "example.org", + }, + { + name: "fake2", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + }, + host: "example2.org", + }, + { + name: "fake3", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "1.2.3.4", + }, + host: "example3.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + { + DNSName: "example2.org", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + { + DNSName: "example3.org", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + }, + }, + }, + { + title: "httpproxy rules with hostname annotation", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"1.2.3.4"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + hostnameAnnotationKey: "dns-through-hostname.com", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + }, + { + DNSName: "dns-through-hostname.com", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + }, + }, + }, + { + title: "httpproxy rules with hostname annotation having multiple hostnames", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"1.2.3.4"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + hostnameAnnotationKey: "dns-through-hostname.com, another-dns-through-hostname.com", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + }, + { + DNSName: "dns-through-hostname.com", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + }, + { + DNSName: "another-dns-through-hostname.com", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + }, + }, + }, + { + title: "httpproxy rules with hostname and target annotation", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + hostnameAnnotationKey: "dns-through-hostname.com", + targetAnnotationKey: "httpproxy-target.com", + }, + host: "example.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + { + DNSName: "dns-through-hostname.com", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + }, + }, + { + title: "httpproxy rules with annotation and custom TTL", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + ttlAnnotationKey: "6", + }, + host: "example.org", + }, + { + name: "fake2", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + ttlAnnotationKey: "1", + }, + host: "example2.org", + }, + { + name: "fake3", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + ttlAnnotationKey: "10s", + }, + host: "example3.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordTTL: endpoint.TTL(6), + }, + { + DNSName: "example2.org", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordTTL: endpoint.TTL(1), + }, + { + DNSName: "example3.org", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordTTL: endpoint.TTL(10), + }, + }, + }, + { + title: "template for httpproxy with annotation", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{}, + hostnames: []string{}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + }, + host: "", + }, + { + name: "fake2", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "httpproxy-target.com", + }, + host: "", + }, + { + name: "fake3", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "1.2.3.4", + }, + host: "", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "fake1.ext-dns.test.com", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + { + DNSName: "fake2.ext-dns.test.com", + Targets: endpoint.Targets{"httpproxy-target.com"}, + RecordType: endpoint.RecordTypeCNAME, + }, + { + DNSName: "fake3.ext-dns.test.com", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + }, + }, + fqdnTemplate: "{{.Name}}.ext-dns.test.com", + }, + { + title: "httpproxy with empty annotation", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{}, + hostnames: []string{}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + targetAnnotationKey: "", + }, + host: "", + }, + }, + expected: []*endpoint.Endpoint{}, + fqdnTemplate: "{{.Name}}.ext-dns.test.com", + }, + { + title: "ignore hostname annotations", + targetNamespace: "", + loadBalancer: fakeLoadBalancerService{ + ips: []string{"8.8.8.8"}, + hostnames: []string{"lb.com"}, + }, + httpProxyItems: []fakeHTTPProxy{ + { + name: "fake1", + namespace: namespace, + annotations: map[string]string{ + hostnameAnnotationKey: "ignore.me", + }, + host: "example.org", + }, + { + name: "fake2", + namespace: namespace, + annotations: map[string]string{ + hostnameAnnotationKey: "ignore.me.too", + }, + host: "new.org", + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "example.org", + Targets: endpoint.Targets{"lb.com"}, + }, + { + DNSName: "new.org", + Targets: endpoint.Targets{"8.8.8.8"}, + }, + { + DNSName: "new.org", + Targets: endpoint.Targets{"lb.com"}, + }, + }, + ignoreHostnameAnnotation: true, + }, + } { + t.Run(ti.title, func(t *testing.T) { + httpProxies := make([]*projectcontour.HTTPProxy, 0) + for _, item := range ti.httpProxyItems { + item.loadBalancer = ti.loadBalancer + httpProxies = append(httpProxies, item.HTTPProxy()) + } + + fakeDynamicClient, scheme := newDynamicKubernetesClient() + for _, httpProxy := range httpProxies { + converted, err := convertHTTPProxyToUnstructured(httpProxy, scheme) + require.NoError(t, err) + _, err = fakeDynamicClient.Resource(projectcontour.HTTPProxyGVR).Namespace(httpProxy.Namespace).Create(context.Background(), converted, metav1.CreateOptions{}) + require.NoError(t, err) + } + + httpProxySource, err := NewContourHTTPProxySource( + fakeDynamicClient, + ti.targetNamespace, + ti.annotationFilter, + ti.fqdnTemplate, + ti.combineFQDNAndAnnotation, + ti.ignoreHostnameAnnotation, + ) + require.NoError(t, err) + + res, err := httpProxySource.Endpoints(context.Background()) + if ti.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + validateEndpoints(t, res, ti.expected) + }) + } +} + +// httpproxy specific helper functions +func newTestHTTPProxySource() (*httpProxySource, error) { + fakeDynamicClient, _ := newDynamicKubernetesClient() + + src, err := NewContourHTTPProxySource( + fakeDynamicClient, + "default", + "", + "{{.Name}}", + false, + false, + ) + if err != nil { + return nil, err + } + + irsrc, ok := src.(*httpProxySource) + if !ok { + return nil, errors.New("underlying source type was not httpproxy") + } + + return irsrc, nil +} + +type fakeHTTPProxy struct { + namespace string + name string + annotations map[string]string + + host string + invalid bool + delegate bool + loadBalancer fakeLoadBalancerService +} + +func (ir fakeHTTPProxy) HTTPProxy() *projectcontour.HTTPProxy { + var status string + if ir.invalid { + status = "invalid" + } else { + status = "valid" + } + + var spec projectcontour.HTTPProxySpec + if ir.delegate { + spec = projectcontour.HTTPProxySpec{} + } else { + spec = projectcontour.HTTPProxySpec{ + VirtualHost: &projectcontour.VirtualHost{ + Fqdn: ir.host, + }, + } + } + + lb := v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{}, + } + + for _, ip := range ir.loadBalancer.ips { + lb.Ingress = append(lb.Ingress, v1.LoadBalancerIngress{ + IP: ip, + }) + } + for _, hostname := range ir.loadBalancer.hostnames { + lb.Ingress = append(lb.Ingress, v1.LoadBalancerIngress{ + Hostname: hostname, + }) + } + + httpProxy := &projectcontour.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ir.namespace, + Name: ir.name, + Annotations: ir.annotations, + }, + Spec: spec, + Status: projectcontour.Status{ + CurrentStatus: status, + LoadBalancer: lb, + }, + } + + return httpProxy +} diff --git a/source/ingressroute.go b/source/ingressroute.go index 26be4cc06..1301f6559 100644 --- a/source/ingressroute.go +++ b/source/ingressroute.go @@ -26,18 +26,16 @@ import ( "time" "github.com/pkg/errors" - contourapi "github.com/projectcontour/contour/apis/contour/v1beta1" + contour "github.com/projectcontour/contour/apis/contour/v1beta1" log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" "sigs.k8s.io/external-dns/endpoint" @@ -90,7 +88,7 @@ func NewContourIngressRouteSource( // Use shared informer to listen for add/update/delete of ingressroutes in the specified namespace. // Set resync period to 0, to prevent processing when nothing has changed. informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicKubeClient, 0, namespace, nil) - ingressRouteInformer := informerFactory.ForResource(contourapi.IngressRouteGVR) + ingressRouteInformer := informerFactory.ForResource(contour.IngressRouteGVR) // Add default resource event handlers to properly initialize informer. ingressRouteInformer.Informer().AddEventHandler( @@ -138,15 +136,15 @@ func (sc *ingressRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoi return nil, err } - // Convert to []*contourapi.IngressRoute - var ingressRoutes []*contourapi.IngressRoute + // Convert to []*contour.IngressRoute + var ingressRoutes []*contour.IngressRoute for _, ir := range irs { unstrucuredIR, ok := ir.(*unstructured.Unstructured) if !ok { return nil, errors.New("could not convert") } - irConverted := &contourapi.IngressRoute{} + irConverted := &contour.IngressRoute{} err := sc.unstructuredConverter.scheme.Convert(unstrucuredIR, irConverted, nil) if err != nil { return nil, err @@ -209,7 +207,7 @@ func (sc *ingressRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoi return endpoints, nil } -func (sc *ingressRouteSource) endpointsFromTemplate(ctx context.Context, ingressRoute *contourapi.IngressRoute) ([]*endpoint.Endpoint, error) { +func (sc *ingressRouteSource) endpointsFromTemplate(ctx context.Context, ingressRoute *contour.IngressRoute) ([]*endpoint.Endpoint, error) { // Process the whole template string var buf bytes.Buffer err := sc.fqdnTemplate.Execute(&buf, ingressRoute) @@ -246,7 +244,7 @@ func (sc *ingressRouteSource) endpointsFromTemplate(ctx context.Context, ingress } // filterByAnnotations filters a list of configs by a given annotation selector. -func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contourapi.IngressRoute) ([]*contourapi.IngressRoute, error) { +func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contour.IngressRoute) ([]*contour.IngressRoute, error) { labelSelector, err := metav1.ParseToLabelSelector(sc.annotationFilter) if err != nil { return nil, err @@ -261,7 +259,7 @@ func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contourapi.In return ingressRoutes, nil } - filteredList := []*contourapi.IngressRoute{} + filteredList := []*contour.IngressRoute{} for _, ingressRoute := range ingressRoutes { // convert the ingressroute's annotations to an equivalent label selector @@ -276,7 +274,7 @@ func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contourapi.In return filteredList, nil } -func (sc *ingressRouteSource) setResourceLabel(ingressRoute *contourapi.IngressRoute, endpoints []*endpoint.Endpoint) { +func (sc *ingressRouteSource) setResourceLabel(ingressRoute *contour.IngressRoute, endpoints []*endpoint.Endpoint) { for _, ep := range endpoints { ep.Labels[endpoint.ResourceLabelKey] = fmt.Sprintf("ingressroute/%s/%s", ingressRoute.Namespace, ingressRoute.Name) } @@ -304,7 +302,7 @@ func (sc *ingressRouteSource) targetsFromContourLoadBalancer(ctx context.Context } // endpointsFromIngressRouteConfig extracts the endpoints from a Contour IngressRoute object -func (sc *ingressRouteSource) endpointsFromIngressRoute(ctx context.Context, ingressRoute *contourapi.IngressRoute) ([]*endpoint.Endpoint, error) { +func (sc *ingressRouteSource) endpointsFromIngressRoute(ctx context.Context, ingressRoute *contour.IngressRoute) ([]*endpoint.Endpoint, error) { if ingressRoute.CurrentStatus != "valid" { log.Warn(errors.Errorf("cannot generate endpoints for ingressroute with status %s", ingressRoute.CurrentStatus)) return nil, nil @@ -358,26 +356,3 @@ func parseContourLoadBalancerService(service string) (namespace, name string, er func (sc *ingressRouteSource) AddEventHandler(ctx context.Context, handler func()) { } - -// UnstructuredConverter handles conversions between unstructured.Unstructured and Contour types -type UnstructuredConverter struct { - // scheme holds an initializer for converting Unstructured to a type - scheme *runtime.Scheme -} - -// NewUnstructuredConverter returns a new UnstructuredConverter initialized -func NewUnstructuredConverter() (*UnstructuredConverter, error) { - uc := &UnstructuredConverter{ - scheme: runtime.NewScheme(), - } - - // Setup converter to understand custom CRD types - contourapi.AddKnownTypes(uc.scheme) - - // Add the core types we need - if err := scheme.AddToScheme(uc.scheme); err != nil { - return nil, err - } - - return uc, nil -} diff --git a/source/ingressroute_test.go b/source/ingressroute_test.go index 984d84d10..8a18182c5 100644 --- a/source/ingressroute_test.go +++ b/source/ingressroute_test.go @@ -80,7 +80,7 @@ func (suite *IngressRouteSuite) SetupTest() { }).IngressRoute() // Convert to unstructured - unstructuredIngressRoute, err := convertToUnstructured(suite.ingressRoute, s) + unstructuredIngressRoute, err := convertIngressRouteToUnstructured(suite.ingressRoute, s) if err != nil { suite.Error(err) } @@ -98,11 +98,12 @@ func (suite *IngressRouteSuite) TestResourceLabelIsSet() { func newDynamicKubernetesClient() (*fakeDynamic.FakeDynamicClient, *runtime.Scheme) { s := runtime.NewScheme() - contour.AddKnownTypes(s) + _ = contour.AddToScheme(s) + _ = projectcontour.AddToScheme(s) return fakeDynamic.NewSimpleDynamicClient(s), s } -func convertToUnstructured(ir *contour.IngressRoute, s *runtime.Scheme) (*unstructured.Unstructured, error) { +func convertIngressRouteToUnstructured(ir *contour.IngressRoute, s *runtime.Scheme) (*unstructured.Unstructured, error) { unstructuredIngressRoute := &unstructured.Unstructured{} if err := s.Convert(ir, unstructuredIngressRoute, context.Background()); err != nil { return nil, err @@ -1013,7 +1014,7 @@ func testIngressRouteEndpoints(t *testing.T) { fakeDynamicClient, scheme := newDynamicKubernetesClient() for _, ingressRoute := range ingressRoutes { - converted, err := convertToUnstructured(ingressRoute, scheme) + converted, err := convertIngressRouteToUnstructured(ingressRoute, scheme) require.NoError(t, err) _, err = fakeDynamicClient.Resource(contour.IngressRouteGVR).Namespace(ingressRoute.Namespace).Create(context.Background(), converted, metav1.CreateOptions{}) require.NoError(t, err) diff --git a/source/store.go b/source/store.go index 3b7ba9742..c6cd064a2 100644 --- a/source/store.go +++ b/source/store.go @@ -221,6 +221,12 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err return nil, err } return NewContourIngressRouteSource(dynamicClient, kubernetesClient, cfg.ContourLoadBalancerService, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation) + case "contour-httpproxy": + dynamicClient, err := p.DynamicKubernetesClient() + if err != nil { + return nil, err + } + return NewContourHTTPProxySource(dynamicClient, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation) case "openshift-route": ocpClient, err := p.OpenShiftClient() if err != nil { diff --git a/source/store_test.go b/source/store_test.go index 8b6792f7c..e47b29549 100644 --- a/source/store_test.go +++ b/source/store_test.go @@ -96,9 +96,9 @@ func (suite *ByNamesTestSuite) TestAllInitialized() { mockClientGenerator.On("IstioClient").Return(NewFakeConfigStore(), nil) mockClientGenerator.On("DynamicKubernetesClient").Return(fakeDynamic, nil) - sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-ingressroute", "fake"}, minimalConfig) + sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-ingressroute", "contour-httpproxy", "fake"}, minimalConfig) suite.NoError(err, "should not generate errors") - suite.Len(sources, 5, "should generate all five sources") + suite.Len(sources, 6, "should generate all six sources") } func (suite *ByNamesTestSuite) TestOnlyFake() { @@ -148,6 +148,8 @@ func (suite *ByNamesTestSuite) TestIstioClientFails() { _, err = ByNames(mockClientGenerator, []string{"contour-ingressroute"}, minimalConfig) suite.Error(err, "should return an error if contour client cannot be created") + _, err = ByNames(mockClientGenerator, []string{"contour-httpproxy"}, minimalConfig) + suite.Error(err, "should return an error if contour client cannot be created") } func TestByNames(t *testing.T) { diff --git a/source/unstructured_converter.go b/source/unstructured_converter.go new file mode 100644 index 000000000..def5ce9c5 --- /dev/null +++ b/source/unstructured_converter.go @@ -0,0 +1,32 @@ +package source + +import ( + contour "github.com/projectcontour/contour/apis/contour/v1beta1" + projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" +) + +// UnstructuredConverter handles conversions between unstructured.Unstructured and Contour types +type UnstructuredConverter struct { + // scheme holds an initializer for converting Unstructured to a type + scheme *runtime.Scheme +} + +// NewUnstructuredConverter returns a new UnstructuredConverter initialized +func NewUnstructuredConverter() (*UnstructuredConverter, error) { + uc := &UnstructuredConverter{ + scheme: runtime.NewScheme(), + } + + // Setup converter to understand custom CRD types + _ = contour.AddToScheme(uc.scheme) + _ = projectcontour.AddToScheme(uc.scheme) + + // Add the core types we need + if err := scheme.AddToScheme(uc.scheme); err != nil { + return nil, err + } + + return uc, nil +} From 6a053460feeffae9fdb995e82d6064f42e86b8c3 Mon Sep 17 00:00:00 2001 From: Patrick D'Addona Date: Thu, 14 May 2020 21:29:04 +0200 Subject: [PATCH 05/46] Enable azure_private_dns to work with non "AzurePublicCloud" clouds, like "AzureUSGovernmentCloud", "AzureChinaCloud" or "AzureGermanCloud" --- provider/azure/azure_private_dns.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/provider/azure/azure_private_dns.go b/provider/azure/azure_private_dns.go index d07279d81..8205c56dc 100644 --- a/provider/azure/azure_private_dns.go +++ b/provider/azure/azure_private_dns.go @@ -65,9 +65,15 @@ func NewAzurePrivateDNSProvider(domainFilter endpoint.DomainFilter, zoneIDFilter return nil, err } - zonesClient := privatedns.NewPrivateZonesClient(subscriptionID) + settings, err := auth.GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + + + zonesClient := privatedns.NewPrivateZonesClientWithBaseURI(settings.Environment.ResourceManagerEndpoint, subscriptionID) zonesClient.Authorizer = authorizer - recordSetsClient := privatedns.NewRecordSetsClient(subscriptionID) + recordSetsClient := privatedns.NewRecordSetsClientWithBaseURI(settings.Environment.ResourceManagerEndpoint, subscriptionID) recordSetsClient.Authorizer = authorizer provider := &AzurePrivateDNSProvider{ From 81f05761562e8f2c5bb0ff9aee56e336125ac186 Mon Sep 17 00:00:00 2001 From: Patrick D'Addona Date: Mon, 18 May 2020 13:36:59 +0200 Subject: [PATCH 06/46] Add test to verify NewAzurePrivateDNSProvider will use the ResourceManagerEndpoint from the configured cloud --- provider/azure/azure_privatedns_test.go | 37 +++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/provider/azure/azure_privatedns_test.go b/provider/azure/azure_privatedns_test.go index c5396ef57..c0095e853 100644 --- a/provider/azure/azure_privatedns_test.go +++ b/provider/azure/azure_privatedns_test.go @@ -18,9 +18,12 @@ package azure import ( "context" - "testing" - "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/stretchr/testify/assert" + "os" + "testing" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" @@ -252,6 +255,36 @@ func newAzurePrivateDNSProvider(domainFilter endpoint.DomainFilter, zoneIDFilter } } +func validateAzurePrivateDNSClientsResourceManager(t *testing.T, environmentName string, expectedResourceManagerEndpoint string) { + err := os.Setenv(auth.EnvironmentName, environmentName) + if err != nil { + t.Fatal(err) + } + + azurePrivateDNSProvider, err := NewAzurePrivateDNSProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), "k8s", "sub", true) + + if err != nil { + t.Fatal(err) + } + + zonesClientBaseURI := azurePrivateDNSProvider.zonesClient.(privatedns.PrivateZonesClient).BaseURI + recordSetsClientBaseURI := azurePrivateDNSProvider.recordSetsClient.(privatedns.RecordSetsClient).BaseURI + + assert.Equal(t, zonesClientBaseURI, expectedResourceManagerEndpoint, "expected and actual resource manager endpoints don't match. expected: %s, got: %s", expectedResourceManagerEndpoint, zonesClientBaseURI) + assert.Equal(t, recordSetsClientBaseURI, expectedResourceManagerEndpoint, "expected and actual resource manager endpoints don't match. expected: %s, got: %s", expectedResourceManagerEndpoint, recordSetsClientBaseURI) +} + +func TestNewAzurePrivateDNSProvider(t *testing.T) { + // make sure to reset the environment variables at the end again + originalEnv := os.Getenv(auth.EnvironmentName) + defer os.Setenv(auth.EnvironmentName, originalEnv) + + validateAzurePrivateDNSClientsResourceManager(t, "", azure.PublicCloud.ResourceManagerEndpoint) + validateAzurePrivateDNSClientsResourceManager(t, "AZURECHINACLOUD", azure.ChinaCloud.ResourceManagerEndpoint) + validateAzurePrivateDNSClientsResourceManager(t, "AZUREGERMANCLOUD", azure.GermanCloud.ResourceManagerEndpoint) + validateAzurePrivateDNSClientsResourceManager(t, "AZUREUSGOVERNMENTCLOUD", azure.USGovernmentCloud.ResourceManagerEndpoint) +} + func TestAzurePrivateDNSRecord(t *testing.T) { provider, err := newMockedAzurePrivateDNSProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, "k8s", &[]privatedns.PrivateZone{ From 83d73646dc8b85601639c0f926c78d6c848861db Mon Sep 17 00:00:00 2001 From: Patrick D'Addona Date: Mon, 18 May 2020 15:30:24 +0200 Subject: [PATCH 07/46] Ran goimports on provider/azure/azure_private_dns.go and provider/azure/azure_privatedns_test.go --- provider/azure/azure_private_dns.go | 1 - provider/azure/azure_privatedns_test.go | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/provider/azure/azure_private_dns.go b/provider/azure/azure_private_dns.go index 8205c56dc..093e4c031 100644 --- a/provider/azure/azure_private_dns.go +++ b/provider/azure/azure_private_dns.go @@ -70,7 +70,6 @@ func NewAzurePrivateDNSProvider(domainFilter endpoint.DomainFilter, zoneIDFilter return nil, err } - zonesClient := privatedns.NewPrivateZonesClientWithBaseURI(settings.Environment.ResourceManagerEndpoint, subscriptionID) zonesClient.Authorizer = authorizer recordSetsClient := privatedns.NewRecordSetsClientWithBaseURI(settings.Environment.ResourceManagerEndpoint, subscriptionID) diff --git a/provider/azure/azure_privatedns_test.go b/provider/azure/azure_privatedns_test.go index c0095e853..1754493b2 100644 --- a/provider/azure/azure_privatedns_test.go +++ b/provider/azure/azure_privatedns_test.go @@ -18,15 +18,15 @@ package azure import ( "context" - "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/stretchr/testify/assert" "os" "testing" + "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/Azure/go-autorest/autorest/to" + "github.com/stretchr/testify/assert" "sigs.k8s.io/external-dns/endpoint" "sigs.k8s.io/external-dns/plan" From 1fd40bd5fda60d0a2cc38b7bbde1b4a839e65c3e Mon Sep 17 00:00:00 2001 From: Joseph Glanville Date: Sat, 22 Aug 2020 17:49:30 +0700 Subject: [PATCH 08/46] Documentation for Contour HTTPProxy support --- docs/tutorials/contour.md | 65 +++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 12 deletions(-) diff --git a/docs/tutorials/contour.md b/docs/tutorials/contour.md index bea7911f4..874c0ef43 100644 --- a/docs/tutorials/contour.md +++ b/docs/tutorials/contour.md @@ -1,8 +1,13 @@ -# Configuring ExternalDNS to use the Contour IngressRoute Source -This tutorial describes how to configure ExternalDNS to use the Contour IngressRoute source. -It is meant to supplement the other provider-specific setup tutorials. +# Setting up External DNS with Contour + +This tutorial describes how to configure External DNS to use either the Contour `IngressRoute` or `HTTPProxy` source. +The `IngressRoute` CRD is deprecated but still in-use in many clusters however it's recommended that you migrate to the `HTTPProxy` resource. +Using the `HTTPProxy` resource with External DNS requires Contour version 1.5 or greater. + +### Example manifests for External DNS +#### Without RBAC +Note that you don't need to enable both of the sources and if you don't enable `contour-ingressroute` you also don't need to configure the `contour-load-balancer` setting. -### Manifest (for clusters without RBAC enabled) ```yaml apiVersion: apps/v1 kind: Deployment @@ -25,8 +30,9 @@ spec: args: - --source=service - --source=ingress - - --source=contour-ingressroute - - --contour-load-balancer=custom-contour-namespace/custom-contour-lb # load balancer service to be used. Omit to use the default (heptio-contour/contour) + - --source=contour-ingressroute # To enable IngressRoute support + - --source=contour-httpproxy # To enable HTTPProxy support + - --contour-load-balancer=custom-contour-namespace/custom-contour-lb # For IngressRoute ONLY: load balancer service to be used. Omit to use the default (heptio-contour/contour) - --domain-filter=external-dns-test.my-org.com # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones - --provider=aws - --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization @@ -35,7 +41,7 @@ spec: - --txt-owner-id=my-identifier ``` -### Manifest (for clusters with RBAC enabled) +#### With RBAC ```yaml apiVersion: v1 kind: ServiceAccount @@ -56,9 +62,14 @@ rules: - apiGroups: [""] resources: ["nodes"] verbs: ["list"] +# This section is only for IngressRoute - apiGroups: ["contour.heptio.com"] resources: ["ingressroutes"] verbs: ["get","watch","list"] +# This section is only for HTTPProxy +- apiGroups: ["projectcontour.io"] + resources: ["httpproxies"] + verbs: ["get","watch","list"] --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding @@ -95,8 +106,9 @@ spec: args: - --source=service - --source=ingress - - --source=contour-ingressroute - - --contour-load-balancer=custom-contour-namespace/custom-contour-lb # load balancer service to be used. Omit to use the default (heptio-contour/contour) + - --source=contour-ingressroute # To enable IngressRoute support + - --source=contour-httpproxy # To enable HTTPProxy support + - --contour-load-balancer=custom-contour-namespace/custom-contour-lb # For IngressRoute ONLY: load balancer service to be used. Omit to use the default (heptio-contour/contour) - --domain-filter=external-dns-test.my-org.com # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones - --provider=aws - --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization @@ -105,9 +117,9 @@ spec: - --txt-owner-id=my-identifier ``` -### Verify External DNS works (IngressRoute example) +### Verify External DNS works The following instructions are based on the -[Contour example workload](https://github.com/heptio/contour/blob/HEAD/examples/example-workload/kuard-ingressroute.yaml). +[Contour example workload](https://github.com/projectcontour/contour/tree/master/examples/example-workload/httpproxy). #### Install a sample service ```bash @@ -147,7 +159,36 @@ spec: app: kuard sessionAffinity: None type: ClusterIP ---- +EOF +``` + +Then create either a `HTTPProxy` or an `IngressRoute` + +#### HTTPProxy +``` +$ kubectl apply -f - < Date: Sat, 22 Aug 2020 18:37:26 +0700 Subject: [PATCH 09/46] Address review comments --- source/httpproxy.go | 14 +++++++------- source/unstructured_converter.go | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/source/httpproxy.go b/source/httpproxy.go index c4af473b6..ea705c937 100644 --- a/source/httpproxy.go +++ b/source/httpproxy.go @@ -97,12 +97,12 @@ func NewContourHTTPProxySource( return httpProxyInformer.Informer().HasSynced(), nil }) if err != nil { - return nil, fmt.Errorf("failed to sync cache: %v", err) + return nil, errors.Wrap(err, "failed to sync cache") } uc, err := NewUnstructuredConverter() if err != nil { - return nil, fmt.Errorf("failed to setup Unstructured Converter: %v", err) + return nil, errors.Wrap(err, "failed to setup Unstructured Converter") } return &httpProxySource{ @@ -136,14 +136,14 @@ func (sc *httpProxySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, hpConverted := &projectcontour.HTTPProxy{} err := sc.unstructuredConverter.scheme.Convert(unstrucuredHP, hpConverted, nil) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to convert to HTTPProxy") } httpProxies = append(httpProxies, hpConverted) } httpProxies, err = sc.filterByAnnotations(httpProxies) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to filter HTTPProxies") } endpoints := []*endpoint.Endpoint{} @@ -162,14 +162,14 @@ func (sc *httpProxySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, hpEndpoints, err := sc.endpointsFromHTTPProxy(hp) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to get endpoints from HTTPProxy") } // apply template if fqdn is missing on HTTPProxy if (sc.combineFQDNAnnotation || len(hpEndpoints) == 0) && sc.fqdnTemplate != nil { tmplEndpoints, err := sc.endpointsFromTemplate(hp) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to get endpoints from template") } if sc.combineFQDNAnnotation { @@ -201,7 +201,7 @@ func (sc *httpProxySource) endpointsFromTemplate(httpProxy *projectcontour.HTTPP var buf bytes.Buffer err := sc.fqdnTemplate.Execute(&buf, httpProxy) if err != nil { - return nil, fmt.Errorf("failed to apply template on HTTPProxy %s/%s: %v", httpProxy.Namespace, httpProxy.Name, err) + return nil, errors.Wrapf(err, "failed to apply template on HTTPProxy %s/%s", httpProxy.Namespace, httpProxy.Name) } hostnames := buf.String() diff --git a/source/unstructured_converter.go b/source/unstructured_converter.go index def5ce9c5..a87194e9f 100644 --- a/source/unstructured_converter.go +++ b/source/unstructured_converter.go @@ -1,3 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package source import ( From d6fc04341338d4c63385aa324349ed40802a2b0a Mon Sep 17 00:00:00 2001 From: Patrick D'Addona Date: Tue, 25 Aug 2020 10:34:11 +0200 Subject: [PATCH 10/46] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3d2ea3ae..94294bb86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ - Improve errors context for AWS provider - Scaleway Provider (#1643) @Sh4d1 +- Enable azure_private_dns to work with non "AzurePublicCloud" clouds (#1578) @daddonpa - Fix typos in documentation @ddymko ## v0.7.3 - 2020-08-05 From 26156ad2db9de162346a5c7bf7de6e08bed1118f Mon Sep 17 00:00:00 2001 From: Loo Zheng Yuan Date: Wed, 26 Aug 2020 01:33:51 +0800 Subject: [PATCH 11/46] improve docs on the exact configuration needed for zone-limited api tokens to work --- docs/tutorials/cloudflare.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/tutorials/cloudflare.md b/docs/tutorials/cloudflare.md index 27f004a70..6190f0c62 100644 --- a/docs/tutorials/cloudflare.md +++ b/docs/tutorials/cloudflare.md @@ -21,7 +21,9 @@ Snippet from [Cloudflare - Getting Started](https://api.cloudflare.com/#getting- API Token will be preferred for authentication if `CF_API_TOKEN` environment variable is set. Otherwise `CF_API_KEY` and `CF_API_EMAIL` should be set to run ExternalDNS with Cloudflare. -When using API Token authentication the token should be granted Zone `Read` and DNS `Edit` privileges. +When using API Token authentication, the token should be granted Zone `Read`, DNS `Edit` privileges, and access to `All zones`. + +If you would like to further restrict the API permissions to a specific zone (or zones), you also need to use the `--zone-id-filter` so that the underlying API requests only access the zones that you explicitly specify, as opposed to accessing all zones. ## Deploy ExternalDNS From 1518ae91f8041dedacab91abe08e598522b43ab2 Mon Sep 17 00:00:00 2001 From: Loo Zheng Yuan Date: Wed, 26 Aug 2020 01:34:14 +0800 Subject: [PATCH 12/46] add examples using the --zone-id-filter flag --- docs/tutorials/cloudflare.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/tutorials/cloudflare.md b/docs/tutorials/cloudflare.md index 6190f0c62..67b4741dd 100644 --- a/docs/tutorials/cloudflare.md +++ b/docs/tutorials/cloudflare.md @@ -54,6 +54,7 @@ spec: args: - --source=service # ingress is also possible - --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above. + - --zone-id-filter=023e105f4ecef8ad9ca31a8372d0c353 # (optional) limit to a specific zone. - --provider=cloudflare - --cloudflare-proxied # (optional) enable the proxy feature of Cloudflare (DDOS protection, CDN...) env: @@ -121,6 +122,7 @@ spec: args: - --source=service # ingress is also possible - --domain-filter=example.com # (optional) limit to only example.com domains; change to match the zone created above. + - --zone-id-filter=023e105f4ecef8ad9ca31a8372d0c353 # (optional) limit to a specific zone. - --provider=cloudflare - --cloudflare-proxied # (optional) enable the proxy feature of Cloudflare (DDOS protection, CDN...) env: From b70c30b711aaa3a95ad7a8c0d37df86cd0e23248 Mon Sep 17 00:00:00 2001 From: Loo Zheng Yuan Date: Wed, 26 Aug 2020 01:38:01 +0800 Subject: [PATCH 13/46] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3d2ea3ae..72cabe129 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ - Improve errors context for AWS provider - Scaleway Provider (#1643) @Sh4d1 - Fix typos in documentation @ddymko +- Add Cloudflare documentation on use of `--zone-id-filter` (#1751) @loozhengyuan ## v0.7.3 - 2020-08-05 From 73be02b15590cd82fca40f243f0bd2eec114533f Mon Sep 17 00:00:00 2001 From: Chema Sanchez Date: Wed, 26 Aug 2020 16:47:15 +0200 Subject: [PATCH 14/46] Fix index out of range when hostname has no dots --- registry/txt.go | 3 +++ registry/txt_test.go | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/registry/txt.go b/registry/txt.go index b4cb97a79..ebf402021 100644 --- a/registry/txt.go +++ b/registry/txt.go @@ -238,6 +238,9 @@ func (pr affixNameMapper) toEndpointName(txtDNSName string) string { func (pr affixNameMapper) toTXTName(endpointDNSName string) string { DNSName := strings.SplitN(endpointDNSName, ".", 2) + if len(DNSName) < 2 { + return pr.prefix + DNSName[0] + pr.suffix + } return pr.prefix + DNSName[0] + pr.suffix + "." + DNSName[1] } diff --git a/registry/txt_test.go b/registry/txt_test.go index 5157dd560..929ade1a4 100644 --- a/registry/txt_test.go +++ b/registry/txt_test.go @@ -400,6 +400,7 @@ func testTXTRegistryApplyChangesWithPrefix(t *testing.T) { Create: []*endpoint.Endpoint{ newEndpointWithOwnerResource("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"), newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"), + newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"), }, Delete: []*endpoint.Endpoint{ newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"), @@ -420,6 +421,8 @@ func testTXTRegistryApplyChangesWithPrefix(t *testing.T) { newEndpointWithOwner("txt.new-record-1.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""), newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "owner", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"), newEndpointWithOwner("txt.multiple.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, "").WithSetIdentifier("test-set-3"), + newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "owner", "ingress/default/my-ingress"), + newEndpointWithOwner("txt.example", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""), }, Delete: []*endpoint.Endpoint{ newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"), @@ -491,6 +494,7 @@ func testTXTRegistryApplyChangesWithSuffix(t *testing.T) { Create: []*endpoint.Endpoint{ newEndpointWithOwnerResource("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"), newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"), + newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "", "ingress/default/my-ingress"), }, Delete: []*endpoint.Endpoint{ newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"), @@ -511,6 +515,8 @@ func testTXTRegistryApplyChangesWithSuffix(t *testing.T) { newEndpointWithOwner("new-record-1-txt.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""), newEndpointWithOwnerResource("multiple.test-zone.example.org", "lb3.loadbalancer.com", "", "owner", "ingress/default/my-ingress").WithSetIdentifier("test-set-3"), newEndpointWithOwner("multiple-txt.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, "").WithSetIdentifier("test-set-3"), + newEndpointWithOwnerResource("example", "new-loadbalancer-1.lb.com", "", "owner", "ingress/default/my-ingress"), + newEndpointWithOwner("example-txt", "\"heritage=external-dns,external-dns/owner=owner,external-dns/resource=ingress/default/my-ingress\"", endpoint.RecordTypeTXT, ""), }, Delete: []*endpoint.Endpoint{ newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"), @@ -577,6 +583,7 @@ func testTXTRegistryApplyChangesNoPrefix(t *testing.T) { changes := &plan.Changes{ Create: []*endpoint.Endpoint{ newEndpointWithOwner("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, ""), + newEndpointWithOwner("example", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, ""), }, Delete: []*endpoint.Endpoint{ newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"), @@ -592,6 +599,8 @@ func testTXTRegistryApplyChangesNoPrefix(t *testing.T) { Create: []*endpoint.Endpoint{ newEndpointWithOwner("new-record-1.test-zone.example.org", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, "owner"), newEndpointWithOwner("new-record-1.test-zone.example.org", "\"heritage=external-dns,external-dns/owner=owner\"", endpoint.RecordTypeTXT, ""), + newEndpointWithOwner("example", "new-loadbalancer-1.lb.com", endpoint.RecordTypeCNAME, "owner"), + newEndpointWithOwner("example", "\"heritage=external-dns,external-dns/owner=owner\"", endpoint.RecordTypeTXT, ""), }, Delete: []*endpoint.Endpoint{ newEndpointWithOwner("foobar.test-zone.example.org", "foobar.loadbalancer.com", endpoint.RecordTypeCNAME, "owner"), From 8af060b9f8a4b5d30701ff66b4a96afcc9b730ec Mon Sep 17 00:00:00 2001 From: Chema Sanchez Date: Thu, 27 Aug 2020 09:22:02 +0200 Subject: [PATCH 15/46] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33693d7ac..bed24df06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - Fix: alibaba cloud keeping create record (#1682) @LXM - Update all container registry references to use k8s.gcr.io @seanmalloy - Provide available prometheus metrics in documentation @vinny-sabatini +- Fix index out of range when hostname has no dots (#1756) @chemasan ## v0.7.3 - 2020-08-05 From 89160ffc3d6fdcf6059abb370df2afff131efdb9 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Fri, 28 Aug 2020 01:43:09 -0500 Subject: [PATCH 16/46] Move Inactive Maintainers To Emeritus Approvers --- OWNERS | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/OWNERS b/OWNERS index ab372d302..dbedf552f 100644 --- a/OWNERS +++ b/OWNERS @@ -2,7 +2,9 @@ # https://github.com/kubernetes/community/blob/HEAD/contributors/guide/owners.md approvers: - - hjacobs - raffo - - linki - njuettner + +emeritus_approvers: + - hjacobs + - linki From 7bf381fd33be556535bfcdea65b922cc9472843d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Grumb=C3=B6ck?= Date: Fri, 28 Aug 2020 13:00:53 +0000 Subject: [PATCH 17/46] Fixes coverall, #1755 --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 183db13b2..2af7c3314 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,10 +35,10 @@ jobs: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 make lint - - name: Coverage - uses: shogo82148/actions-goveralls@v1 - - name: Test - run: make test - + run: go test -race -coverprofile=profile.cov ./... + - name: Send coverage + uses: shogo82148/actions-goveralls@v1 + with: + path-to-profile: profile.cov From 5eceb08ced5f5f2f88dd08c3b8cd4ec125066f08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Grumb=C3=B6ck?= Date: Fri, 28 Aug 2020 13:09:06 +0000 Subject: [PATCH 18/46] Updated CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33693d7ac..44bf8aad2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - Fix: alibaba cloud keeping create record (#1682) @LXM - Update all container registry references to use k8s.gcr.io @seanmalloy - Provide available prometheus metrics in documentation @vinny-sabatini +- Fixes test coverage with coveralls (#1755) @jgrumboe ## v0.7.3 - 2020-08-05 From 4291995765c88dd69a9209be3210ebd9e8975168 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Grumb=C3=B6ck?= Date: Fri, 28 Aug 2020 13:47:49 +0000 Subject: [PATCH 19/46] Updated FAQ for usage of annotation-filter --- docs/faq.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/faq.md b/docs/faq.md index 44ca362bd..1ae350065 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -272,6 +272,9 @@ an instance of a ingress controller. Let's assume you have two ingress controlle then you can start two ExternalDNS providers one with `--annotation-filter=kubernetes.io/ingress.class=nginx-internal` and one with `--annotation-filter=kubernetes.io/ingress.class=nginx-external`. +Beware when using multiple sources, e.g. `--source=service --source=ingress`, `--annotation-filter` will filter every given source objects. +If you need to filter only one specific source you have to run a separated external dns service containing only the wanted `--source` and `--annotation-filter`. + ### Can external-dns manage(add/remove) records in a hosted zone which is setup in different AWS account? Yes, give it the correct cross-account/assume-role permissions and use the `--aws-assume-role` flag https://github.com/kubernetes-sigs/external-dns/pull/524#issue-181256561 From bfda251c71f8c42048c43580ad5a632407a5b689 Mon Sep 17 00:00:00 2001 From: David Dooling Date: Mon, 31 Aug 2020 16:26:33 -0500 Subject: [PATCH 20/46] Add tutorial for GKE with workload identity Add instructions for using GKE workload identity to allow ExternalDNS to authenticate against Google Cloud DNS APIs. Add blog link to README. Signed-off-by: David Dooling --- CHANGELOG.md | 1 + README.md | 2 + docs/tutorials/gke.md | 361 +++++++++++++++++++++++++++----- docs/tutorials/nginx-ingress.md | 332 ++++++++++++++++++++++++++++- 4 files changed, 628 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 124393767..81d2bd9fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - Update all container registry references to use k8s.gcr.io @seanmalloy - Provide available prometheus metrics in documentation @vinny-sabatini - Fix index out of range when hostname has no dots (#1756) @chemasan +- Add tutorial for GKE with workload identity (#1765) @ddgenome ## v0.7.3 - 2020-08-05 diff --git a/README.md b/README.md index 40b272184..46c0e5311 100644 --- a/README.md +++ b/README.md @@ -332,7 +332,9 @@ ExternalDNS is an effort to unify the following similar projects in order to bri * Molecule Software's [route53-kubernetes](https://github.com/wearemolecule/route53-kubernetes) ### User Demo How-To Blogs and Examples + * A full demo on GKE Kubernetes. See [How-to Kubernetes with DNS management (ssl-manager pre-req)](https://medium.com/@jpantjsoha/how-to-kubernetes-with-dns-management-for-gitops-31239ea75d8d) +* Run external-dns on GKE with workload identity. See [Kubernetes, ingress-nginx, cert-manager & external-dns](https://blog.atomist.com/kubernetes-ingress-nginx-cert-manager-external-dns/) ### Code of conduct diff --git a/docs/tutorials/gke.md b/docs/tutorials/gke.md index 57d1d983a..7cba0c341 100644 --- a/docs/tutorials/gke.md +++ b/docs/tutorials/gke.md @@ -4,8 +4,6 @@ This tutorial describes how to setup ExternalDNS for usage within a GKE cluster. ## Set up your environment -*If you prefer to try-out ExternalDNS in one of the existing environments you can skip this step* - Setup your environment to work with Google Cloud Platform. Fill in your values as needed, e.g. target project. ```console @@ -14,6 +12,16 @@ $ gcloud config set compute/region "europe-west1" $ gcloud config set compute/zone "europe-west1-d" ``` +## GKE Node Scopes + +*If you prefer to try-out ExternalDNS in one of the existing environments you can skip this step* + +The following instructions use instance scopes to provide ExternalDNS with the +permissions it needs to manage DNS records. Note that since these permissions +are associated with the instance, all pods in the cluster will also have these +permissions. As such, this approach is not suitable for anything but testing +environments. + Create a GKE cluster. ```console @@ -52,59 +60,10 @@ $ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \ $ gcloud dns record-sets transaction execute --zone "gcp-zalan-do" ``` -## Deploy ExternalDNS +### Deploy ExternalDNS -### Role-Based Access Control (RBAC) +Then apply the following manifests file to deploy ExternalDNS. -[RBAC]("https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control") is enabled by default on all Container clusters which are running Kubernetes version 1.6 or higher. - -Because of the way Container Engine checks permissions when you create a Role or ClusterRole, you must first create a RoleBinding that grants you all of the permissions included in the role you want to create. - -```console -kubectl create clusterrolebinding your-user-cluster-admin-binding --clusterrole=cluster-admin --user=your.google.cloud.email@example.org -``` - -Connect your `kubectl` client to the cluster you just created. - -```console -gcloud container clusters get-credentials "external-dns" -``` - -Then apply one of the following manifests file to deploy ExternalDNS. - -### Manifest (for clusters without RBAC enabled) -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: external-dns -spec: - strategy: - type: Recreate - selector: - matchLabels: - app: external-dns - template: - metadata: - labels: - app: external-dns - spec: - containers: - - name: external-dns - image: k8s.gcr.io/external-dns/external-dns:v0.7.3 - args: - - --source=service - - --source=ingress - - --domain-filter=external-dns-test.gcp.zalan.do # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones - - --provider=google -# - --google-project=zalando-external-dns-test # Use this to specify a project different from the one external-dns is running inside - - --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization - - --registry=txt - - --txt-prefix=extdns # when using `registry=txt` option, make sure to also use the `txt-prefix` and `txt-owner-id` options as well. If you try to create a `TXT` record without a prefix, it will try to create a `TXT` record with the same name as your actual DNS record and fail (creating a stranded record `external-dns` cannot manage). - - --txt-owner-id=my-identifier -``` - -### Manifest (for clusters with RBAC enabled) ```yaml apiVersion: v1 kind: ServiceAccount @@ -171,8 +130,7 @@ spec: Use `--dry-run` if you want to be extra careful on the first run. Note, that you will not see any records created when you are running in dry-run mode. You can, however, inspect the logs and watch what would have been done. - -## Verify ExternalDNS works +### Verify ExternalDNS works Create the following sample application to test that ExternalDNS works. @@ -302,7 +260,7 @@ $ curl via-ingress.external-dns-test.gcp.zalan.do ``` -## Clean up +### Clean up Make sure to delete all Service and Ingress objects before terminating the cluster so all load balancers get cleaned up correctly. @@ -327,5 +285,294 @@ $ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com. $ gcloud dns record-sets transaction execute --zone "gcp-zalan-do" ``` -### User Demo How-To Blogs and Examples +## GKE with Workload Identity + +The following instructions use [GKE workload +identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +to provide ExternalDNS with the permissions it needs to manage DNS records. +Workload identity is the Google-recommended way to provide GKE workloads access +to GCP APIs. + +Create a GKE cluster with workload identity enabled. + +```console +$ gcloud container clusters create external-dns \ + --workload-metadata-from-node=GKE_METADATA_SERVER \ + --identity-namespace=zalando-external-dns-test.svc.id.goog +``` + +Create a GCP service account (GSA) for ExternalDNS and save its email address. + +```console +$ sa_name="Kubernetes external-dns" +$ gcloud iam service-accounts create sa-edns --display-name="$sa_name" +$ sa_email=$(gcloud iam service-accounts list --format='value(email)' \ + --filter="displayName:$sa_name") +``` + +Bind the ExternalDNS GSA to the DNS admin role. + +```console +$ gcloud projects add-iam-policy-binding zalando-external-dns-test \ + --member="serviceAccount:$sa_email" --role=roles/dns.admin +``` + +Link the ExternalDNS GSA to the Kubernetes service account (KSA) that +external-dns will run under, i.e., the external-dns KSA in the external-dns +namespaces. + +```console +$ gcloud iam service-accounts add-iam-policy-binding "$sa_email" \ + --member="serviceAccount:zalando-external-dns-test.svc.id.goog[external-dns/external-dns]" \ + --role=roles/iam.workloadIdentityUser +``` + +Create a DNS zone which will contain the managed DNS records. + +```console +$ gcloud dns managed-zones create external-dns-test-gcp-zalan-do \ + --dns-name=external-dns-test.gcp.zalan.do. \ + --description="Automatically managed zone by ExternalDNS" +``` + +Make a note of the nameservers that were assigned to your new zone. + +```console +$ gcloud dns record-sets list \ + --zone=external-dns-test-gcp-zalan-do \ + --name=external-dns-test.gcp.zalan.do. \ + --type NS +NAME TYPE TTL DATA +external-dns-test.gcp.zalan.do. NS 21600 ns-cloud-e1.googledomains.com.,ns-cloud-e2.googledomains.com.,ns-cloud-e3.googledomains.com.,ns-cloud-e4.googledomains.com. +``` + +In this case it's `ns-cloud-{e1-e4}.googledomains.com.` but your's could +slightly differ, e.g. `{a1-a4}`, `{b1-b4}` etc. + +Tell the parent zone where to find the DNS records for this zone by adding the +corresponding NS records there. Assuming the parent zone is "gcp-zalan-do" and +the domain is "gcp.zalan.do" and that it's also hosted at Google we would do the +following. + +```console +$ gcloud dns record-sets transaction start --zone=gcp-zalan-do +$ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \ + --name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do +$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do +``` + +Connect your `kubectl` client to the cluster you just created and bind your GCP +user to the cluster admin role in Kubernetes. + +```console +$ gcloud container clusters get-credentials external-dns +$ kubectl create clusterrolebinding cluster-admin-me \ + --clusterrole=cluster-admin --user="$(gcloud config get-value account)" +``` + +### Deploy ExternalDNS + +Apply the following manifest file to deploy external-dns. + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: external-dns +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-dns + namespace: external-dns +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: external-dns +rules: + - apiGroups: [""] + resources: ["services", "endpoints", "pods"] + verbs: ["get", "watch", "list"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "watch", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: external-dns-viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns +subjects: + - kind: ServiceAccount + name: external-dns + namespace: external-dns +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns + namespace: external-dns +spec: + strategy: + type: Recreate + selector: + matchLabels: + app: external-dns + template: + metadata: + labels: + app: external-dns + spec: + containers: + - args: + - --source=ingress + - --source=service + - --domain-filter=external-dns-test.gcp.zalan.do + - --provider=google + - --google-project=zalando-external-dns-test + - --registry=txt + - --txt-owner-id=my-identifier + image: k8s.gcr.io/external-dns/external-dns:v0.7.3 + name: external-dns + securityContext: + fsGroup: 65534 + runAsUser: 65534 + serviceAccountName: external-dns +``` + +Then add the proper workload identity annotation to the cert-manager service +account. + +```bash +$ kubectl annotate serviceaccount --namespace=external-dns external-dns \ + "iam.gke.io/gcp-service-account=$sa_email" +``` + +### Deploy a sample application + +Create the following sample application to test that ExternalDNS works. + +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: nginx +spec: + rules: + - host: via-ingress.external-dns-test.gcp.zalan.do + http: + paths: + - backend: + serviceName: nginx + servicePort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + external-dns.alpha.kubernetes.io/hostname: nginx.external-dns-test.gcp.zalan.do. + name: nginx +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: nginx + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 +``` + +After roughly two minutes check that a corresponding DNS records for your +service and ingress were created. + +```console +$ gcloud dns record-sets list \ + --zone "external-dns-test-gcp-zalan-do" \ + --name "via-ingress.external-dns-test.gcp.zalan.do." \ + --type A +NAME TYPE TTL DATA +nginx.external-dns-test.gcp.zalan.do. A 300 104.155.60.49 +nginx.external-dns-test.gcp.zalan.do. TXT 300 "heritage=external-dns,external-dns/owner=my-identifier" +via-ingress.external-dns-test.gcp.zalan.do. TXT 300 "heritage=external-dns,external-dns/owner=my-identifier" +via-ingress.external-dns-test.gcp.zalan.do. A 300 35.187.1.246 +``` + +Let's check that we can resolve this DNS name as well. + +```console +$ dig +short @ns-cloud-e1.googledomains.com. via-ingress.external-dns-test.gcp.zalan.do. +35.187.1.246 +``` + +Try with `curl` as well. + +```console +$ curl via-ingress.external-dns-test.gcp.zalan.do + + + +Welcome to nginx! +... + + +... + + +``` + +### Clean up + +Make sure to delete all service and ingress objects before terminating the +cluster so all load balancers and DNS entries get cleaned up correctly. + +```console +$ kubectl delete ingress nginx +$ kubectl delete service nginx +``` + +Give ExternalDNS some time to clean up the DNS records for you. Then delete the +managed zone and cluster. + +```console +$ gcloud dns managed-zones delete external-dns-test-gcp-zalan-do +$ gcloud container clusters delete external-dns +``` + +Also delete the NS records for your removed zone from the parent zone. + +```console +$ gcloud dns record-sets transaction start --zone gcp-zalan-do +$ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com. \ + --name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do +$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do +``` + +## User Demo How-To Blogs and Examples + * A full demo on GKE Kubernetes + CloudDNS + SA-Permissions [How-to Kubernetes with DNS management (ssl-manager pre-req)](https://medium.com/@jpantjsoha/how-to-kubernetes-with-dns-management-for-gitops-31239ea75d8d) +* Run external-dns on GKE with workload identity. See [Kubernetes, ingress-nginx, cert-manager & external-dns](https://blog.atomist.com/kubernetes-ingress-nginx-cert-manager-external-dns/) diff --git a/docs/tutorials/nginx-ingress.md b/docs/tutorials/nginx-ingress.md index 1fa7f25a7..b2898935f 100644 --- a/docs/tutorials/nginx-ingress.md +++ b/docs/tutorials/nginx-ingress.md @@ -2,6 +2,8 @@ This tutorial describes how to setup ExternalDNS for usage within a GKE cluster that doesn't make use of Google's [default ingress controller](https://github.com/kubernetes/ingress-gce) but rather uses [nginx-ingress-controller](https://github.com/kubernetes/ingress-nginx) for that task. +## Set up your environment + Setup your environment to work with Google Cloud Platform. Fill in your values as needed, e.g. target project. ```console @@ -10,6 +12,14 @@ $ gcloud config set compute/region "europe-west1" $ gcloud config set compute/zone "europe-west1-d" ``` +## GKE Node Scopes + +The following instructions use instance scopes to provide ExternalDNS with the +permissions it needs to manage DNS records. Note that since these permissions +are associated with the instance, all pods in the cluster will also have these +permissions. As such, this approach is not suitable for anything but testing +environments. + Create a GKE cluster without using the default ingress controller. ```console @@ -48,19 +58,20 @@ $ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \ $ gcloud dns record-sets transaction execute --zone "gcp-zalan-do" ``` -If you decide not to create a new zone but reuse an existing one, make sure it's currently **unused** and **empty**. This version of ExternalDNS will remove all records it doesn't recognize from the zone. - -Connect your `kubectl` client to the cluster you just created. +Connect your `kubectl` client to the cluster you just created and bind your GCP +user to the cluster admin role in Kubernetes. ```console -gcloud container clusters get-credentials "external-dns" +$ gcloud container clusters get-credentials "external-dns" +$ kubectl create clusterrolebinding cluster-admin-me \ + --clusterrole=cluster-admin --user="$(gcloud config get-value account)" ``` -## Deploy the nginx ingress controller +### Deploy the nginx ingress controller First, you need to deploy the nginx-based ingress controller. It can be deployed in at least two modes: Leveraging a Layer 4 load balancer in front of the nginx proxies or directly targeting pods with hostPorts on your worker nodes. ExternalDNS doesn't really care and supports both modes. -### Default Backend +#### Default Backend The nginx controller uses a default backend that it serves when no Ingress rule matches. This is a separate Service that can be picked by you. We'll use the default backend that's used by other ingress controllers for that matter. Apply the following manifests to your cluster to deploy the default backend. @@ -96,7 +107,7 @@ spec: image: gcr.io/google_containers/defaultbackend:1.3 ``` -### Without a separate TCP load balancer +#### Without a separate TCP load balancer By default, the controller will update your Ingress objects with the public IPs of the nodes running your nginx controller instances. You should run multiple instances in case of pod or node failure. The controller will do leader election and will put multiple IPs as targets in your Ingress objects in that case. It could also make sense to run it as a DaemonSet. However, we'll just run a single replica. You have to open the respective ports on all of your worker nodes to allow nginx to receive traffic. @@ -145,7 +156,7 @@ spec: hostPort: 443 ``` -### With a separate TCP load balancer +#### With a separate TCP load balancer However, you can also have the ingress controller proxied by a Kubernetes Service. This will instruct the controller to populate this Service's external IP as the external IP of the Ingress. This exposes the nginx proxies via a Layer 4 load balancer (`type=LoadBalancer`) which is more reliable than the other method. With that approach, you can run as many nginx proxy instances on your cluster as you like or have them autoscaled. This is the preferred way of running the nginx controller. @@ -206,7 +217,7 @@ spec: - containerPort: 443 ``` -## Deploy ExternalDNS +### Deploy ExternalDNS Apply the following manifest file to deploy ExternalDNS. @@ -274,7 +285,7 @@ spec: Use `--dry-run` if you want to be extra careful on the first run. Note, that you will not see any records created when you are running in dry-run mode. You can, however, inspect the logs and watch what would have been done. -## Deploy a sample application +### Deploy a sample application Create the following sample application to test that ExternalDNS works. @@ -363,7 +374,7 @@ $ curl via-ingress.external-dns-test.gcp.zalan.do ``` -## Clean up +### Clean up Make sure to delete all Service and Ingress objects before terminating the cluster so all load balancers and DNS entries get cleaned up correctly. @@ -387,3 +398,302 @@ $ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com. --name "external-dns-test.gcp.zalan.do." --ttl 300 --type NS --zone "gcp-zalan-do" $ gcloud dns record-sets transaction execute --zone "gcp-zalan-do" ``` + +## GKE with Workload Identity + +The following instructions use [GKE workload +identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +to provide ExternalDNS with the permissions it needs to manage DNS records. +Workload identity is the Google-recommended way to provide GKE workloads access +to GCP APIs. + +Create a GKE cluster with workload identity enabled and without the +HttpLoadBalancing add-on. + +```console +$ gcloud container clusters create external-dns \ + --workload-metadata-from-node=GKE_METADATA_SERVER \ + --identity-namespace=zalando-external-dns-test.svc.id.goog \ + --addons=HorizontalPodAutoscaling +``` + +Create a GCP service account (GSA) for ExternalDNS and save its email address. + +```console +$ sa_name="Kubernetes external-dns" +$ gcloud iam service-accounts create sa-edns --display-name="$sa_name" +$ sa_email=$(gcloud iam service-accounts list --format='value(email)' \ + --filter="displayName:$sa_name") +``` + +Bind the ExternalDNS GSA to the DNS admin role. + +```console +$ gcloud projects add-iam-policy-binding zalando-external-dns-test \ + --member="serviceAccount:$sa_email" --role=roles/dns.admin +``` + +Link the ExternalDNS GSA to the Kubernetes service account (KSA) that +external-dns will run under, i.e., the external-dns KSA in the external-dns +namespaces. + +```console +$ gcloud iam service-accounts add-iam-policy-binding "$sa_email" \ + --member="serviceAccount:zalando-external-dns-test.svc.id.goog[external-dns/external-dns]" \ + --role=roles/iam.workloadIdentityUser +``` + +Create a DNS zone which will contain the managed DNS records. + +```console +$ gcloud dns managed-zones create external-dns-test-gcp-zalan-do \ + --dns-name=external-dns-test.gcp.zalan.do. \ + --description="Automatically managed zone by ExternalDNS" +``` + +Make a note of the nameservers that were assigned to your new zone. + +```console +$ gcloud dns record-sets list \ + --zone=external-dns-test-gcp-zalan-do \ + --name=external-dns-test.gcp.zalan.do. \ + --type NS +NAME TYPE TTL DATA +external-dns-test.gcp.zalan.do. NS 21600 ns-cloud-e1.googledomains.com.,ns-cloud-e2.googledomains.com.,ns-cloud-e3.googledomains.com.,ns-cloud-e4.googledomains.com. +``` + +In this case it's `ns-cloud-{e1-e4}.googledomains.com.` but your's could +slightly differ, e.g. `{a1-a4}`, `{b1-b4}` etc. + +Tell the parent zone where to find the DNS records for this zone by adding the +corresponding NS records there. Assuming the parent zone is "gcp-zalan-do" and +the domain is "gcp.zalan.do" and that it's also hosted at Google we would do the +following. + +```console +$ gcloud dns record-sets transaction start --zone=gcp-zalan-do +$ gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \ + --name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do +$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do +``` + +Connect your `kubectl` client to the cluster you just created and bind your GCP +user to the cluster admin role in Kubernetes. + +```console +$ gcloud container clusters get-credentials external-dns +$ kubectl create clusterrolebinding cluster-admin-me \ + --clusterrole=cluster-admin --user="$(gcloud config get-value account)" +``` + +### Deploy ingress-nginx + +Follow the [ingress-nginx GKE installation +instructions](https://kubernetes.github.io/ingress-nginx/deploy/#gce-gke) to +deploy it to the cluster. + +```console +$ kubectl apply -f \ + https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.35.0/deploy/static/provider/cloud/deploy.yaml +``` + +### Deploy ExternalDNS + +Apply the following manifest file to deploy external-dns. + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: external-dns +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-dns + namespace: external-dns +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: external-dns +rules: + - apiGroups: [""] + resources: ["services", "endpoints", "pods"] + verbs: ["get", "watch", "list"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "watch", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: external-dns-viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns +subjects: + - kind: ServiceAccount + name: external-dns + namespace: external-dns +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns + namespace: external-dns +spec: + strategy: + type: Recreate + selector: + matchLabels: + app: external-dns + template: + metadata: + labels: + app: external-dns + spec: + containers: + - args: + - --source=ingress + - --domain-filter=external-dns-test.gcp.zalan.do + - --provider=google + - --google-project=zalando-external-dns-test + - --registry=txt + - --txt-owner-id=my-identifier + image: k8s.gcr.io/external-dns/external-dns:v0.7.3 + name: external-dns + securityContext: + fsGroup: 65534 + runAsUser: 65534 + serviceAccountName: external-dns +``` + +Then add the proper workload identity annotation to the cert-manager service +account. + +```bash +$ kubectl annotate serviceaccount --namespace=external-dns external-dns \ + "iam.gke.io/gcp-service-account=$sa_email" +``` + +### Deploy a sample application + +Create the following sample application to test that ExternalDNS works. + +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: nginx + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: via-ingress.external-dns-test.gcp.zalan.do + http: + paths: + - backend: + serviceName: nginx + servicePort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: nginx +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 +``` + +After roughly two minutes check that a corresponding DNS record for your ingress +was created. + +```console +$ gcloud dns record-sets list \ + --zone "external-dns-test-gcp-zalan-do" \ + --name "via-ingress.external-dns-test.gcp.zalan.do." \ + --type A +NAME TYPE TTL DATA +via-ingress.external-dns-test.gcp.zalan.do. A 300 35.187.1.246 +``` + +Let's check that we can resolve this DNS name as well. + +```console +$ dig +short @ns-cloud-e1.googledomains.com. via-ingress.external-dns-test.gcp.zalan.do. +35.187.1.246 +``` + +Try with `curl` as well. + +```console +$ curl via-ingress.external-dns-test.gcp.zalan.do + + + +Welcome to nginx! +... + + +... + + +``` + +### Clean up + +Make sure to delete all service and ingress objects before terminating the +cluster so all load balancers and DNS entries get cleaned up correctly. + +```console +$ kubectl delete service --namespace=ingress-nginx ingress-nginx-controller +$ kubectl delete ingress nginx +``` + +Give ExternalDNS some time to clean up the DNS records for you. Then delete the +managed zone and cluster. + +```console +$ gcloud dns managed-zones delete external-dns-test-gcp-zalan-do +$ gcloud container clusters delete external-dns +``` + +Also delete the NS records for your removed zone from the parent zone. + +```console +$ gcloud dns record-sets transaction start --zone gcp-zalan-do +$ gcloud dns record-sets transaction remove ns-cloud-e{1..4}.googledomains.com. \ + --name=external-dns-test.gcp.zalan.do. --ttl 300 --type NS --zone=gcp-zalan-do +$ gcloud dns record-sets transaction execute --zone=gcp-zalan-do +``` + +## User Demo How-To Blogs and Examples + +* Run external-dns on GKE with workload identity. See [Kubernetes, ingress-nginx, cert-manager & external-dns](https://blog.atomist.com/kubernetes-ingress-nginx-cert-manager-external-dns/) From 541c22e72c73cfb82223f998491d17845d95c814 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Grumb=C3=B6ck?= Date: Fri, 28 Aug 2020 13:00:53 +0000 Subject: [PATCH 21/46] Fixes coverall, #1755 --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 183db13b2..2af7c3314 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,10 +35,10 @@ jobs: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 make lint - - name: Coverage - uses: shogo82148/actions-goveralls@v1 - - name: Test - run: make test - + run: go test -race -coverprofile=profile.cov ./... + - name: Send coverage + uses: shogo82148/actions-goveralls@v1 + with: + path-to-profile: profile.cov From 26f2badfc99b0ca73e680343fd286ecba18c3bf9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Grumb=C3=B6ck?= Date: Fri, 28 Aug 2020 13:09:06 +0000 Subject: [PATCH 22/46] Updated CHANGELOG because of rebasing --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 124393767..663b52b16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - Update all container registry references to use k8s.gcr.io @seanmalloy - Provide available prometheus metrics in documentation @vinny-sabatini - Fix index out of range when hostname has no dots (#1756) @chemasan +- Fixes test coverage with coveralls (#1755) @jgrumboe ## v0.7.3 - 2020-08-05 From 6f5ef2a56d46ad9b87300de4843df424f41ebbf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Grumb=C3=B6ck?= Date: Tue, 1 Sep 2020 06:30:29 +0000 Subject: [PATCH 23/46] Moved coverprofile creation into Makefile --- .github/workflows/ci.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2af7c3314..6bb602edc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: make lint - name: Test - run: go test -race -coverprofile=profile.cov ./... + run: make test - name: Send coverage uses: shogo82148/actions-goveralls@v1 diff --git a/Makefile b/Makefile index 06f6bea34..8f09d6c85 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ lint: licensecheck go-lint .PHONY: verify test test: - go test -race ./... + go test -race -coverprofile=profile.cov ./... # The build targets allow to build the binary and docker image .PHONY: build build.docker build.mini From d42251eaad129d1fd893d20466c0642afb72a275 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Sun, 30 Aug 2020 23:34:48 -0500 Subject: [PATCH 24/46] Add quick start section to contributor docs --- README.md | 27 ++--------------------- docs/contributing/getting-started.md | 33 +++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 46c0e5311..e5e93ee1e 100644 --- a/README.md +++ b/README.md @@ -155,34 +155,11 @@ The following tutorials are provided: ### Running Locally -#### Technical Requirements - -Make sure you have the following prerequisites: -* A local Go 1.11+ development environment. -* Access to a Google/AWS account with the DNS API enabled. -* Access to a Kubernetes cluster that supports exposing Services, e.g. GKE. +See the [contributor guide](docs/contributing/getting-started.md) for details on compiling +from source. #### Setup Steps -First, get ExternalDNS: - -```console -$ git clone https://github.com/kubernetes-sigs/external-dns.git && cd external-dns -``` - -**This project uses [Go modules](https://github.com/golang/go/wiki/Modules) as -introduced in Go 1.11 therefore you need Go >=1.11 installed in order to build.** -If using Go 1.11 you also need to [activate Module -support](https://github.com/golang/go/wiki/Modules#installing-and-activating-module-support). - -Assuming Go has been setup with module support it can be built simply by running: - -```console -$ make -``` - -This will create external-dns in the build directory directly from the default branch. - Next, run an application and expose it via a Kubernetes Service: ```console diff --git a/docs/contributing/getting-started.md b/docs/contributing/getting-started.md index 3ca84b425..f155540f3 100644 --- a/docs/contributing/getting-started.md +++ b/docs/contributing/getting-started.md @@ -1,10 +1,33 @@ -# Project structure +# Quick Start -### Building +- [Git](https://git-scm.com/downloads) +- [Go 1.14+](https://golang.org/dl/) +- [Go modules](https://github.com/golang/go/wiki/Modules) +- [golangci-lint](https://github.com/golangci/golangci-lint) +- [Docker](https://docs.docker.com/install/) +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) -You can build ExternalDNS for your platform with `make build`, you may have to install the necessary dependencies with `make dep`. The binary will land at `build/external-dns`. +Compile and run locally against a remote k8s cluster. +``` +git clone https://github.com/kubernetes-sigs/external-dns.git && cd external-dns +make build +# login to remote k8s cluster +./build/external-dns --source=service --provider=inmemory --once +``` -### Design +Run linting, unit tests, and coverage report. +``` +make lint +make test +make cover-html +``` + +Build container image. +``` +make build.docker +``` + +# Design ExternalDNS's sources of DNS records live in package [source](../../source). They implement the `Source` interface that has a single method `Endpoints` which returns the represented source's objects converted to `Endpoints`. Endpoints are just a tuple of DNS name and target where target can be an IP or another hostname. @@ -20,7 +43,7 @@ The orchestration between the different components is controlled by the [control You can pick which `Source` and `Provider` to use at runtime via the `--source` and `--provider` flags, respectively. -### Adding a DNS provider +# Adding a DNS Provider A typical way to start on, e.g. a CoreDNS provider, would be to add a `coredns.go` to the providers package and implement the interface methods. Then you would have to register your provider under a name in `main.go`, e.g. `coredns`, and would be able to trigger it's functions via setting `--provider=coredns`. From f52c47a25967a98ca244827e0c4159d8dfc61344 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Tue, 1 Sep 2020 08:43:47 -0500 Subject: [PATCH 25/46] Bump CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 930a2d472..31971ed68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ ## Unreleased +- Add quick start section to contributing docs (#1766) @seanmalloy - Enhance pull request template @seanmalloy - Improve errors context for AWS provider - Scaleway Provider (#1643) @Sh4d1 From 6a2f40a6d1ebae17b775da6c8759ade4520dd65f Mon Sep 17 00:00:00 2001 From: Raffaele Di Fazio Date: Tue, 1 Sep 2020 21:05:28 +0200 Subject: [PATCH 26/46] remove checklist from PR template --- .github/pull_request_template.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 5e87915b8..f71127118 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -7,11 +7,3 @@ - -Fixes #ISSUE - -**Checklist** - -- [ ] Unit tests updated -- [ ] End user documentation updated -- [ ] CHANGELOG.md updated, use section "Unreleased" From 8e28d46aebc96a9792f1100de11e1e5079fe85f2 Mon Sep 17 00:00:00 2001 From: Kostas Kapetanakis Date: Tue, 1 Sep 2020 23:49:49 +0300 Subject: [PATCH 27/46] fix ingress-controller yaml link --- docs/tutorials/kube-ingress-aws.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tutorials/kube-ingress-aws.md b/docs/tutorials/kube-ingress-aws.md index 565e1185d..266cf4159 100644 --- a/docs/tutorials/kube-ingress-aws.md +++ b/docs/tutorials/kube-ingress-aws.md @@ -76,7 +76,7 @@ rules: ``` See also current RBAC yaml files: -- [kube-ingress-aws-controller](https://github.com/zalando-incubator/kubernetes-on-aws/blob/dev/cluster/manifests/ingress-controller/rbac.yaml) +- [kube-ingress-aws-controller](https://github.com/zalando-incubator/kubernetes-on-aws/blob/dev/cluster/manifests/ingress-controller/01-rbac.yaml) - [skipper](https://github.com/zalando-incubator/kubernetes-on-aws/blob/dev/cluster/manifests/skipper/rbac.yaml) - [external-dns](https://github.com/zalando-incubator/kubernetes-on-aws/blob/dev/cluster/manifests/external-dns/rbac.yaml) From fdbd00f9c099fa2aa3a7904d2c3a04b88e2e109c Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Tue, 1 Sep 2020 22:49:53 -0500 Subject: [PATCH 28/46] Update pull requests template * Add section for linked issues * Add checklist * Do NOT include CHANGELOG updates as part of the checklist --- .github/pull_request_template.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index f71127118..3c8ac29ff 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -7,3 +7,10 @@ + +Fixes #ISSUE + +**Checklist** + +- [ ] Unit tests updated +- [ ] End user documentation updated From 53c0cf951ddef932f37311154eddba53998d5cdc Mon Sep 17 00:00:00 2001 From: codearky <45842933+codearky@users.noreply.github.com> Date: Wed, 2 Sep 2020 21:33:06 +0300 Subject: [PATCH 29/46] =?UTF-8?q?Remove=20duplication=20of=20external=20ip?= =?UTF-8?q?s=20when=20ExternalTrafficPolicy=20set=20to=20=E2=80=A6=20(#174?= =?UTF-8?q?4)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove duplication of target ips for NodePort services with ExternalTrafficPolicy=Local * Removed trailing lines --- CHANGELOG.md | 1 + source/service.go | 6 +++++- source/service_test.go | 46 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31971ed68..fba5cd904 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - Fix index out of range when hostname has no dots (#1756) @chemasan - Fixes test coverage with coveralls (#1755) @jgrumboe - Add tutorial for GKE with workload identity (#1765) @ddgenome +- Fix NodePort with externaltrafficpolicy targets duplication @codearky ## v0.7.3 - 2020-08-05 diff --git a/source/service.go b/source/service.go index 2b9389f50..2c8fb0997 100644 --- a/source/service.go +++ b/source/service.go @@ -511,6 +511,7 @@ func (sc *serviceSource) extractNodePortTargets(svc *v1.Service) (endpoint.Targe switch svc.Spec.ExternalTrafficPolicy { case v1.ServiceExternalTrafficPolicyTypeLocal: + nodesMap := map[*v1.Node]struct{}{} labelSelector, err := metav1.ParseToLabelSelector(labels.Set(svc.Spec.Selector).AsSelectorPreValidated().String()) if err != nil { return nil, err @@ -531,7 +532,10 @@ func (sc *serviceSource) extractNodePortTargets(svc *v1.Service) (endpoint.Targe log.Debugf("Unable to find node where Pod %s is running", v.Spec.Hostname) continue } - nodes = append(nodes, node) + if _, ok := nodesMap[node]; !ok { + nodesMap[node] = *new(struct{}) + nodes = append(nodes, node) + } } } default: diff --git a/source/service_test.go b/source/service_test.go index 8a8e428ad..081ef2247 100644 --- a/source/service_test.go +++ b/source/service_test.go @@ -1545,6 +1545,52 @@ func TestNodePortServices(t *testing.T) { []int{1}, []v1.PodPhase{v1.PodRunning}, }, + { + "annotated NodePort services with ExternalTrafficPolicy=Local and multiple pods on a single node return an endpoint with unique IP addresses of the cluster's nodes where pods is running only", + "", + "", + "testing", + "foo", + v1.ServiceTypeNodePort, + v1.ServiceExternalTrafficPolicyTypeLocal, + "", + "", + false, + map[string]string{}, + map[string]string{ + hostnameAnnotationKey: "foo.example.org.", + }, + nil, + []*endpoint.Endpoint{ + {DNSName: "_30192._tcp.foo.example.org", Targets: endpoint.Targets{"0 50 30192 foo.example.org"}, RecordType: endpoint.RecordTypeSRV}, + {DNSName: "foo.example.org", Targets: endpoint.Targets{"54.10.11.2"}, RecordType: endpoint.RecordTypeA}, + }, + false, + []*v1.Node{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "54.10.11.1"}, + {Type: v1.NodeInternalIP, Address: "10.0.1.1"}, + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "54.10.11.2"}, + {Type: v1.NodeInternalIP, Address: "10.0.1.2"}, + }, + }, + }}, + []string{"pod-0", "pod-1"}, + []int{1, 1}, + []v1.PodPhase{v1.PodRunning, v1.PodRunning}, + }, } { t.Run(tc.title, func(t *testing.T) { // Create a Kubernetes testing client From 5a8916ea221ef8b5f875b78fb0c7d173c62a3bba Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Fri, 28 Aug 2020 01:17:14 -0500 Subject: [PATCH 30/46] Update Contributing Documentation --- CONTRIBUTING.md | 28 ++++++++++++++-------------- README.md | 30 ++++++++++++++---------------- 2 files changed, 28 insertions(+), 30 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f645c84a5..28f25107d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,22 +1,22 @@ -# Contributing guidelines +# Contributing Guidelines -## How to become a contributor and submit your own code +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: -### Contributor License Agreements +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ -We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles. +## Getting Started -Please fill out either the individual or corporate Contributor License Agreement (CLA). +We have full documentation on how to get started contributing here: - * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an individual CLA. - * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a corporate CLA. +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers -To sign and submit a CLA, see the [CLA doc](https://git.k8s.io/community/CLA.md). +## Mentorship -### Contributing A Patch +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! -1. Submit an issue describing your proposed change to the repo in question. -1. The [repo owners](OWNERS) will respond to your issue promptly. -1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). -1. Fork the desired repo, develop and test your code changes. -1. Submit a pull request. +## Contact Information + +- [Slack channel](https://kubernetes.slack.com/messages/external-dns) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-network) diff --git a/README.md b/README.md index e5e93ee1e..36201d3f6 100644 --- a/README.md +++ b/README.md @@ -280,24 +280,26 @@ Have a look at [the milestones](https://github.com/kubernetes-sigs/external-dns/ ## Contributing -We encourage you to get involved with ExternalDNS, as users, contributors or as new maintainers that can take over some parts like different providers and help with code reviews. +Are you interested in contributing to external-dns? We, the maintainers and community, would love your +suggestions, contributions, and help! Also, the maintainers can be contacted at any time to learn more +about how to get involved. -Providers which currently need maintainers: +We also encourage ALL active community participants to act as if they are maintainers, even if you don't have +"official" write permissions. This is a community effort, we are here to serve the Kubernetes community. If you +have an active interest and you want to get involved, you have real power! Don't assume that the only people who +can get things done around here are the "maintainers". We also would love to add more "official" maintainers, so +show us what you can do! -* Azure -* Cloudflare -* Digital Ocean -* Google Cloud Platform +The external-dns project is currently in need of maintainers for specific DNS providers. Ideally each provider +would have at least two maintainers. It would be nice if the maintainers run the provider in production, but it +is not strictly required. Provider listed [here](https://github.com/kubernetes-sigs/external-dns#status-of-providers) +that do not have a maintainer listed are in need of assistance. -Any provider should have at least one maintainer. It would be nice if you run it in production, but it is not required. -You should check changes and make sure your provider is working correctly. - -It would be also great to have an automated end-to-end test for different cloud providers, so help from Kubernetes maintainers and their idea on how this can be done would be valuable. +The external-dns project is also in need of automated end-to-end tests for different DNS providers. Any help from the +Kubernetes community and ideas on how this can be accomplished would be valuable. Read the [contributing guidelines](CONTRIBUTING.md) and have a look at [the contributing docs](docs/contributing/getting-started.md) to learn about building the project, the project structure, and the purpose of each package. -If you are interested please reach out to us on the [Kubernetes slack](http://slack.k8s.io) in the #external-dns channel. - For an overview on how to write new Sources and Providers check out [Sources and Providers](docs/contributing/sources-and-providers.md). ## Heritage @@ -312,7 +314,3 @@ ExternalDNS is an effort to unify the following similar projects in order to bri * A full demo on GKE Kubernetes. See [How-to Kubernetes with DNS management (ssl-manager pre-req)](https://medium.com/@jpantjsoha/how-to-kubernetes-with-dns-management-for-gitops-31239ea75d8d) * Run external-dns on GKE with workload identity. See [Kubernetes, ingress-nginx, cert-manager & external-dns](https://blog.atomist.com/kubernetes-ingress-nginx-cert-manager-external-dns/) - -### Code of conduct - -Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). From 428b305d765ce853cec41c98c32ea9e1abef32b1 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Tue, 1 Sep 2020 09:01:16 -0500 Subject: [PATCH 31/46] Bump CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fba5cd904..23e27b631 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - Fixes test coverage with coveralls (#1755) @jgrumboe - Add tutorial for GKE with workload identity (#1765) @ddgenome - Fix NodePort with externaltrafficpolicy targets duplication @codearky +- Update contributing section in README (#1760) @seanmalloy ## v0.7.3 - 2020-08-05 From 153131041f7910646d32e42d12bea7629f229ef1 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Wed, 2 Sep 2020 08:58:19 -0500 Subject: [PATCH 32/46] Remove test automation section from contributing docs --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 36201d3f6..853f86c60 100644 --- a/README.md +++ b/README.md @@ -295,9 +295,6 @@ would have at least two maintainers. It would be nice if the maintainers run the is not strictly required. Provider listed [here](https://github.com/kubernetes-sigs/external-dns#status-of-providers) that do not have a maintainer listed are in need of assistance. -The external-dns project is also in need of automated end-to-end tests for different DNS providers. Any help from the -Kubernetes community and ideas on how this can be accomplished would be valuable. - Read the [contributing guidelines](CONTRIBUTING.md) and have a look at [the contributing docs](docs/contributing/getting-started.md) to learn about building the project, the project structure, and the purpose of each package. For an overview on how to write new Sources and Providers check out [Sources and Providers](docs/contributing/sources-and-providers.md). From dac21e3aff65e5b5c9d0682fc55bf848e898fb9c Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Fri, 7 Jun 2019 11:46:30 -0400 Subject: [PATCH 33/46] Add --zone-name-filter option for azure provider --- main.go | 3 ++- pkg/apis/externaldns/types.go | 2 ++ pkg/apis/externaldns/types_test.go | 5 +++++ provider/azure/azure.go | 20 +++++++++++++++++++- 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/main.go b/main.go index ecbda90d7..03a7f94e8 100644 --- a/main.go +++ b/main.go @@ -141,6 +141,7 @@ func main() { endpointsSource := source.NewDedupSource(source.NewMultiSource(sources)) domainFilter := endpoint.NewDomainFilterWithExclusions(cfg.DomainFilter, cfg.ExcludeDomains) + zoneNameFilter := endpoint.NewDomainFilter(cfg.ZoneNameFilter) zoneIDFilter := provider.NewZoneIDFilter(cfg.ZoneIDFilter) zoneTypeFilter := provider.NewZoneTypeFilter(cfg.AWSZoneType) zoneTagFilter := provider.NewZoneTagFilter(cfg.AWSZoneTagFilter) @@ -185,7 +186,7 @@ func main() { } p, err = awssd.NewAWSSDProvider(domainFilter, cfg.AWSZoneType, cfg.AWSAssumeRole, cfg.DryRun) case "azure-dns", "azure": - p, err = azure.NewAzureProvider(cfg.AzureConfigFile, domainFilter, zoneIDFilter, cfg.AzureResourceGroup, cfg.AzureUserAssignedIdentityClientID, cfg.DryRun) + p, err = azure.NewAzureProvider(cfg.AzureConfigFile, domainFilter, zoneNameFilter, zoneIDFilter, cfg.AzureResourceGroup, cfg.AzureUserAssignedIdentityClientID, cfg.DryRun) case "azure-private-dns": p, err = azure.NewAzurePrivateDNSProvider(domainFilter, zoneIDFilter, cfg.AzureResourceGroup, cfg.AzureSubscriptionID, cfg.DryRun) case "vinyldns": diff --git a/pkg/apis/externaldns/types.go b/pkg/apis/externaldns/types.go index 1476b4685..bb75044fe 100644 --- a/pkg/apis/externaldns/types.go +++ b/pkg/apis/externaldns/types.go @@ -61,6 +61,7 @@ type Config struct { GoogleBatchChangeInterval time.Duration DomainFilter []string ExcludeDomains []string + ZoneNameFilter []string ZoneIDFilter []string AlibabaCloudConfigFile string AlibabaCloudZoneType string @@ -321,6 +322,7 @@ func (cfg *Config) ParseFlags(args []string) error { app.Flag("provider", "The DNS provider where the DNS records will be created (required, options: aws, aws-sd, google, azure, azure-dns, azure-private-dns, cloudflare, rcodezero, digitalocean, hetzner, dnsimple, akamai, infoblox, dyn, designate, coredns, skydns, inmemory, ovh, pdns, oci, exoscale, linode, rfc2136, ns1, transip, vinyldns, rdns, scaleway, vultr, ultradns)").Required().PlaceHolder("provider").EnumVar(&cfg.Provider, "aws", "aws-sd", "google", "azure", "azure-dns", "hetzner", "azure-private-dns", "alibabacloud", "cloudflare", "rcodezero", "digitalocean", "dnsimple", "akamai", "infoblox", "dyn", "designate", "coredns", "skydns", "inmemory", "ovh", "pdns", "oci", "exoscale", "linode", "rfc2136", "ns1", "transip", "vinyldns", "rdns", "scaleway", "vultr", "ultradns") app.Flag("domain-filter", "Limit possible target zones by a domain suffix; specify multiple times for multiple domains (optional)").Default("").StringsVar(&cfg.DomainFilter) app.Flag("exclude-domains", "Exclude subdomains (optional)").Default("").StringsVar(&cfg.ExcludeDomains) + app.Flag("zone-name-filter", "Filter target zones by zone domain (For now, only AzureDNS provider is using this flag); specify multiple times for multiple zones (optional)").Default("").StringsVar(&cfg.ZoneNameFilter) app.Flag("zone-id-filter", "Filter target zones by hosted zone id; specify multiple times for multiple zones (optional)").Default("").StringsVar(&cfg.ZoneIDFilter) app.Flag("google-project", "When using the Google provider, current project is auto-detected, when running on GCP. Specify other project with this. Must be specified when running outside GCP.").Default(defaultConfig.GoogleProject).StringVar(&cfg.GoogleProject) app.Flag("google-batch-change-size", "When using the Google provider, set the maximum number of changes that will be applied in each batch.").Default(strconv.Itoa(defaultConfig.GoogleBatchChangeSize)).IntVar(&cfg.GoogleBatchChangeSize) diff --git a/pkg/apis/externaldns/types_test.go b/pkg/apis/externaldns/types_test.go index b6a55ae86..e472da906 100644 --- a/pkg/apis/externaldns/types_test.go +++ b/pkg/apis/externaldns/types_test.go @@ -44,6 +44,7 @@ var ( GoogleBatchChangeInterval: time.Second, DomainFilter: []string{""}, ExcludeDomains: []string{""}, + ZoneNameFilter: []string{""}, ZoneIDFilter: []string{""}, AlibabaCloudConfigFile: "/etc/kubernetes/alibaba-cloud.json", AWSZoneType: "", @@ -119,6 +120,7 @@ var ( GoogleBatchChangeInterval: time.Second * 2, DomainFilter: []string{"example.org", "company.com"}, ExcludeDomains: []string{"xapi.example.org", "xapi.company.com"}, + ZoneNameFilter: []string{"yapi.example.org", "yapi.company.com"}, ZoneIDFilter: []string{"/hostedzone/ZTST1", "/hostedzone/ZTST2"}, AlibabaCloudConfigFile: "/etc/kubernetes/alibaba-cloud.json", AWSZoneType: "private", @@ -252,6 +254,8 @@ func TestParseFlags(t *testing.T) { "--domain-filter=company.com", "--exclude-domains=xapi.example.org", "--exclude-domains=xapi.company.com", + "--zone-name-filter=yapi.example.org", + "--zone-name-filter=yapi.company.com", "--zone-id-filter=/hostedzone/ZTST1", "--zone-id-filter=/hostedzone/ZTST2", "--aws-zone-type=private", @@ -339,6 +343,7 @@ func TestParseFlags(t *testing.T) { "EXTERNAL_DNS_TLS_CA": "/path/to/ca.crt", "EXTERNAL_DNS_TLS_CLIENT_CERT": "/path/to/cert.pem", "EXTERNAL_DNS_TLS_CLIENT_CERT_KEY": "/path/to/key.pem", + "EXTERNAL_DNS_ZONE_NAME_FILTER": "yapi.example.org\nyapi.company.com", "EXTERNAL_DNS_ZONE_ID_FILTER": "/hostedzone/ZTST1\n/hostedzone/ZTST2", "EXTERNAL_DNS_AWS_ZONE_TYPE": "private", "EXTERNAL_DNS_AWS_ZONE_TAGS": "tag=foo", diff --git a/provider/azure/azure.go b/provider/azure/azure.go index d216f5dde..0584bd0c2 100644 --- a/provider/azure/azure.go +++ b/provider/azure/azure.go @@ -69,6 +69,7 @@ type RecordSetsClient interface { type AzureProvider struct { provider.BaseProvider domainFilter endpoint.DomainFilter + zoneNameFilter endpoint.DomainFilter zoneIDFilter provider.ZoneIDFilter dryRun bool resourceGroup string @@ -80,7 +81,7 @@ type AzureProvider struct { // NewAzureProvider creates a new Azure provider. // // Returns the provider or an error if a provider could not be created. -func NewAzureProvider(configFile string, domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, resourceGroup string, userAssignedIdentityClientID string, dryRun bool) (*AzureProvider, error) { +func NewAzureProvider(configFile string, domainFilter endpoint.DomainFilter, zoneNameFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, resourceGroup string, userAssignedIdentityClientID string, dryRun bool) (*AzureProvider, error) { contents, err := ioutil.ReadFile(configFile) if err != nil { return nil, fmt.Errorf("failed to read Azure config file '%s': %v", configFile, err) @@ -122,6 +123,7 @@ func NewAzureProvider(configFile string, domainFilter endpoint.DomainFilter, zon provider := &AzureProvider{ domainFilter: domainFilter, + zoneNameFilter: zoneNameFilter, zoneIDFilter: zoneIDFilter, dryRun: dryRun, resourceGroup: cfg.ResourceGroup, @@ -205,6 +207,11 @@ func (p *AzureProvider) Records(ctx context.Context) (endpoints []*endpoint.Endp return true } name := formatAzureDNSName(*recordSet.Name, *zone.Name) + + if len(p.zoneNameFilter.Filters) > 0 && !p.domainFilter.Match(name) { + log.Debugf("Skipping return of record %s because it was filtered out by the specified --domain-filter", name) + return false + } targets := extractAzureTargets(&recordSet) if len(targets) == 0 { log.Errorf("Failed to extract targets for '%s' with type '%s'.", name, recordType) @@ -262,6 +269,9 @@ func (p *AzureProvider) zones(ctx context.Context) ([]dns.Zone, error) { if zone.Name != nil && p.domainFilter.Match(*zone.Name) && p.zoneIDFilter.Match(*zone.ID) { zones = append(zones, zone) + } else if zone.Name != nil && len(p.zoneNameFilter.Filters) > 0 && p.zoneNameFilter.Match(*zone.Name) { + // Handle zoneNameFilter + zones = append(zones, zone) } err := zonesIterator.NextWithContext(ctx) @@ -344,6 +354,10 @@ func (p *AzureProvider) deleteRecords(ctx context.Context, deleted azureChangeMa for zone, endpoints := range deleted { for _, endpoint := range endpoints { name := p.recordSetNameForZone(zone, endpoint) + if !p.domainFilter.Match(endpoint.DNSName) { + log.Debugf("Skipping deletion of record %s because it was filtered out by the specified --domain-filter", endpoint.DNSName) + continue + } if p.dryRun { log.Infof("Would delete %s record named '%s' for Azure DNS zone '%s'.", endpoint.RecordType, name, zone) } else { @@ -366,6 +380,10 @@ func (p *AzureProvider) updateRecords(ctx context.Context, updated azureChangeMa for zone, endpoints := range updated { for _, endpoint := range endpoints { name := p.recordSetNameForZone(zone, endpoint) + if !p.domainFilter.Match(endpoint.DNSName) { + log.Debugf("Skipping update of record %s because it was filtered out by the specified --domain-filter", endpoint.DNSName) + continue + } if p.dryRun { log.Infof( "Would update %s record named '%s' to '%s' for Azure DNS zone '%s'.", From 837d1ea248eef6c48a7a9db64b585ce8ec83c6af Mon Sep 17 00:00:00 2001 From: Benjamin Pineau Date: Tue, 4 Aug 2020 07:44:05 +0200 Subject: [PATCH 34/46] aws: cache zones list When it syncs AWS DNS with k8s cluster content (at `--interval`), external-dns submits two distinct Route53 API calls: * to fetch available zones (eg. for tag based zones discovery, or when zones are created after exernal-dns started), * to fetch relevant zones' resource records. Each call taxes the Route53 APIs calls budget (5 API calls per second per AWS account/region hard limit), increasing the probability of being throttled. Changing synchronization interval would mitigate those calls' impact, but at the cost of keeping stale records for a longer time. For most practical uses cases, zones list aren't expected to change frequently. Even less so when external-dns is provided an explicit, static zones set (`--zone-id-filter` rather than `--aws-zone-tags`). Using a zones list cache halves the number of Route53 read API calls. --- CHANGELOG.md | 1 + docs/tutorials/aws.md | 7 +++++ main.go | 1 + pkg/apis/externaldns/types.go | 3 ++ pkg/apis/externaldns/types_test.go | 48 ++++++++++++++++-------------- provider/aws/aws.go | 20 +++++++++++++ provider/aws/aws_test.go | 2 ++ 7 files changed, 60 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 23e27b631..1c8517c51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - Add tutorial for GKE with workload identity (#1765) @ddgenome - Fix NodePort with externaltrafficpolicy targets duplication @codearky - Update contributing section in README (#1760) @seanmalloy +- Option to cache AWS zones list @bpineau ## v0.7.3 - 2020-08-05 diff --git a/docs/tutorials/aws.md b/docs/tutorials/aws.md index 579429d25..e708cafe2 100644 --- a/docs/tutorials/aws.md +++ b/docs/tutorials/aws.md @@ -420,3 +420,10 @@ Give ExternalDNS some time to clean up the DNS records for you. Then delete the ```console $ aws route53 delete-hosted-zone --id /hostedzone/ZEWFWZ4R16P7IB ``` + +## Throttling + +Route53 has a [5 API requests per second per account hard quota](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-route-53). +Running several fast polling ExternalDNS instances in a given account can easily hit that limit. Some ways to circumvent that issue includes: +* Augment the synchronization interval (`--interval`), at the cost of slower changes propagation. +* If the ExternalDNS managed zones list doesn't change frequently, set `--aws-zones-cache-duration` (zones list cache time-to-live) to a larger value. Note that zones list cache can be disabled with `--aws-zones-cache-duration=0s`. diff --git a/main.go b/main.go index ecbda90d7..1a02976a2 100644 --- a/main.go +++ b/main.go @@ -175,6 +175,7 @@ func main() { APIRetries: cfg.AWSAPIRetries, PreferCNAME: cfg.AWSPreferCNAME, DryRun: cfg.DryRun, + ZoneCacheDuration: cfg.AWSZoneCacheDuration, }, ) case "aws-sd": diff --git a/pkg/apis/externaldns/types.go b/pkg/apis/externaldns/types.go index 1476b4685..0fa84b130 100644 --- a/pkg/apis/externaldns/types.go +++ b/pkg/apis/externaldns/types.go @@ -72,6 +72,7 @@ type Config struct { AWSEvaluateTargetHealth bool AWSAPIRetries int AWSPreferCNAME bool + AWSZoneCacheDuration time.Duration AzureConfigFile string AzureResourceGroup string AzureSubscriptionID string @@ -176,6 +177,7 @@ var defaultConfig = &Config{ AWSEvaluateTargetHealth: true, AWSAPIRetries: 3, AWSPreferCNAME: false, + AWSZoneCacheDuration: 0 * time.Second, AzureConfigFile: "/etc/kubernetes/azure.json", AzureResourceGroup: "", AzureSubscriptionID: "", @@ -335,6 +337,7 @@ func (cfg *Config) ParseFlags(args []string) error { app.Flag("aws-evaluate-target-health", "When using the AWS provider, set whether to evaluate the health of a DNS target (default: enabled, disable with --no-aws-evaluate-target-health)").Default(strconv.FormatBool(defaultConfig.AWSEvaluateTargetHealth)).BoolVar(&cfg.AWSEvaluateTargetHealth) app.Flag("aws-api-retries", "When using the AWS provider, set the maximum number of retries for API calls before giving up.").Default(strconv.Itoa(defaultConfig.AWSAPIRetries)).IntVar(&cfg.AWSAPIRetries) app.Flag("aws-prefer-cname", "When using the AWS provider, prefer using CNAME instead of ALIAS (default: disabled)").BoolVar(&cfg.AWSPreferCNAME) + app.Flag("aws-zones-cache-duration", "When using the AWS provider, set the zones list cache TTL (0s to disable).").Default(defaultConfig.AWSZoneCacheDuration.String()).DurationVar(&cfg.AWSZoneCacheDuration) app.Flag("azure-config-file", "When using the Azure provider, specify the Azure configuration file (required when --provider=azure").Default(defaultConfig.AzureConfigFile).StringVar(&cfg.AzureConfigFile) app.Flag("azure-resource-group", "When using the Azure provider, override the Azure resource group to use (required when --provider=azure-private-dns)").Default(defaultConfig.AzureResourceGroup).StringVar(&cfg.AzureResourceGroup) app.Flag("azure-subscription-id", "When using the Azure provider, specify the Azure configuration file (required when --provider=azure-private-dns)").Default(defaultConfig.AzureSubscriptionID).StringVar(&cfg.AzureSubscriptionID) diff --git a/pkg/apis/externaldns/types_test.go b/pkg/apis/externaldns/types_test.go index b6a55ae86..14c6a1e07 100644 --- a/pkg/apis/externaldns/types_test.go +++ b/pkg/apis/externaldns/types_test.go @@ -29,17 +29,17 @@ import ( var ( minimalConfig = &Config{ - APIServerURL: "", - KubeConfig: "", - RequestTimeout: time.Second * 30, - ContourLoadBalancerService: "heptio-contour/contour", - SkipperRouteGroupVersion: "zalando.org/v1", - Sources: []string{"service"}, - Namespace: "", - FQDNTemplate: "", - Compatibility: "", - Provider: "google", - GoogleProject: "", + APIServerURL: "", + KubeConfig: "", + RequestTimeout: time.Second * 30, + ContourLoadBalancerService: "heptio-contour/contour", + SkipperRouteGroupVersion: "zalando.org/v1", + Sources: []string{"service"}, + Namespace: "", + FQDNTemplate: "", + Compatibility: "", + Provider: "google", + GoogleProject: "", GoogleBatchChangeSize: 1000, GoogleBatchChangeInterval: time.Second, DomainFilter: []string{""}, @@ -54,6 +54,7 @@ var ( AWSEvaluateTargetHealth: true, AWSAPIRetries: 3, AWSPreferCNAME: false, + AWSZoneCacheDuration: 0 * time.Second, AzureConfigFile: "/etc/kubernetes/azure.json", AzureResourceGroup: "", AzureSubscriptionID: "", @@ -103,17 +104,17 @@ var ( } overriddenConfig = &Config{ - APIServerURL: "http://127.0.0.1:8080", - KubeConfig: "/some/path", - RequestTimeout: time.Second * 77, - ContourLoadBalancerService: "heptio-contour-other/contour-other", - SkipperRouteGroupVersion: "zalando.org/v2", - Sources: []string{"service", "ingress", "connector"}, - Namespace: "namespace", - IgnoreHostnameAnnotation: true, - FQDNTemplate: "{{.Name}}.service.example.com", - Compatibility: "mate", - Provider: "google", + APIServerURL: "http://127.0.0.1:8080", + KubeConfig: "/some/path", + RequestTimeout: time.Second * 77, + ContourLoadBalancerService: "heptio-contour-other/contour-other", + SkipperRouteGroupVersion: "zalando.org/v2", + Sources: []string{"service", "ingress", "connector"}, + Namespace: "namespace", + IgnoreHostnameAnnotation: true, + FQDNTemplate: "{{.Name}}.service.example.com", + Compatibility: "mate", + Provider: "google", GoogleProject: "project", GoogleBatchChangeSize: 100, GoogleBatchChangeInterval: time.Second * 2, @@ -129,6 +130,7 @@ var ( AWSEvaluateTargetHealth: false, AWSAPIRetries: 13, AWSPreferCNAME: true, + AWSZoneCacheDuration: 10 * time.Second, AzureConfigFile: "azure.json", AzureResourceGroup: "arg", AzureSubscriptionID: "arg", @@ -261,6 +263,7 @@ func TestParseFlags(t *testing.T) { "--aws-batch-change-interval=2s", "--aws-api-retries=13", "--aws-prefer-cname", + "--aws-zones-cache-duration=10s", "--no-aws-evaluate-target-health", "--policy=upsert-only", "--registry=noop", @@ -348,6 +351,7 @@ func TestParseFlags(t *testing.T) { "EXTERNAL_DNS_AWS_EVALUATE_TARGET_HEALTH": "0", "EXTERNAL_DNS_AWS_API_RETRIES": "13", "EXTERNAL_DNS_AWS_PREFER_CNAME": "true", + "EXTERNAL_DNS_AWS_ZONES_CACHE_DURATION": "10s", "EXTERNAL_DNS_POLICY": "upsert-only", "EXTERNAL_DNS_REGISTRY": "noop", "EXTERNAL_DNS_TXT_OWNER_ID": "owner-1", diff --git a/provider/aws/aws.go b/provider/aws/aws.go index 8108c463f..22d6f2a6f 100644 --- a/provider/aws/aws.go +++ b/provider/aws/aws.go @@ -117,6 +117,12 @@ type Route53API interface { ListTagsForResourceWithContext(ctx context.Context, input *route53.ListTagsForResourceInput, opts ...request.Option) (*route53.ListTagsForResourceOutput, error) } +type zonesListCache struct { + age time.Time + duration time.Duration + zones map[string]*route53.HostedZone +} + // AWSProvider is an implementation of Provider for AWS Route53. type AWSProvider struct { provider.BaseProvider @@ -134,6 +140,7 @@ type AWSProvider struct { // filter hosted zones by tags zoneTagFilter provider.ZoneTagFilter preferCNAME bool + zonesCache *zonesListCache } // AWSConfig contains configuration to create a new AWS provider. @@ -149,6 +156,7 @@ type AWSConfig struct { APIRetries int PreferCNAME bool DryRun bool + ZoneCacheDuration time.Duration } // NewAWSProvider initializes a new AWS Route53 based Provider. @@ -188,6 +196,7 @@ func NewAWSProvider(awsConfig AWSConfig) (*AWSProvider, error) { evaluateTargetHealth: awsConfig.EvaluateTargetHealth, preferCNAME: awsConfig.PreferCNAME, dryRun: awsConfig.DryRun, + zonesCache: &zonesListCache{duration: awsConfig.ZoneCacheDuration}, } return provider, nil @@ -195,6 +204,12 @@ func NewAWSProvider(awsConfig AWSConfig) (*AWSProvider, error) { // Zones returns the list of hosted zones. func (p *AWSProvider) Zones(ctx context.Context) (map[string]*route53.HostedZone, error) { + if p.zonesCache.zones != nil && time.Since(p.zonesCache.age) < p.zonesCache.duration { + log.Debug("Using cached zones list") + return p.zonesCache.zones, nil + } + log.Debug("Refreshing zones list cache") + zones := make(map[string]*route53.HostedZone) var tagErr error @@ -242,6 +257,11 @@ func (p *AWSProvider) Zones(ctx context.Context) (map[string]*route53.HostedZone log.Debugf("Considering zone: %s (domain: %s)", aws.StringValue(zone.Id), aws.StringValue(zone.Name)) } + if p.zonesCache.duration > time.Duration(0) { + p.zonesCache.zones = zones + p.zonesCache.age = time.Now() + } + return zones, nil } diff --git a/provider/aws/aws_test.go b/provider/aws/aws_test.go index aaa32741f..3de6cff1e 100644 --- a/provider/aws/aws_test.go +++ b/provider/aws/aws_test.go @@ -500,6 +500,7 @@ func TestAWSApplyChanges(t *testing.T) { ctx := tt.setup(provider) + provider.zonesCache = &zonesListCache{duration: 0 * time.Minute} counter := NewRoute53APICounter(provider.client) provider.client = counter require.NoError(t, provider.ApplyChanges(ctx, changes)) @@ -1200,6 +1201,7 @@ func newAWSProviderWithTagFilter(t *testing.T, domainFilter endpoint.DomainFilte zoneTypeFilter: zoneTypeFilter, zoneTagFilter: zoneTagFilter, dryRun: false, + zonesCache: &zonesListCache{duration: 1 * time.Minute}, } createAWSZone(t, provider, &route53.HostedZone{ From a706ba32ab173bf9839d6040d5a6973a57384a0d Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Tue, 8 Sep 2020 11:18:04 -0400 Subject: [PATCH 35/46] Add test for ZoneNameFilter --- provider/azure/azure.go | 2 +- provider/azure/azure_test.go | 147 +++++++++++++++++++++++++++++++++-- 2 files changed, 143 insertions(+), 6 deletions(-) diff --git a/provider/azure/azure.go b/provider/azure/azure.go index 0584bd0c2..1c82b0136 100644 --- a/provider/azure/azure.go +++ b/provider/azure/azure.go @@ -210,7 +210,7 @@ func (p *AzureProvider) Records(ctx context.Context) (endpoints []*endpoint.Endp if len(p.zoneNameFilter.Filters) > 0 && !p.domainFilter.Match(name) { log.Debugf("Skipping return of record %s because it was filtered out by the specified --domain-filter", name) - return false + return true } targets := extractAzureTargets(&recordSet) if len(targets) == 0 { diff --git a/provider/azure/azure_test.go b/provider/azure/azure_test.go index 30a21f2d6..fa5e2341d 100644 --- a/provider/azure/azure_test.go +++ b/provider/azure/azure_test.go @@ -207,7 +207,7 @@ func (client *mockRecordSetsClient) CreateOrUpdate(ctx context.Context, resource } // newMockedAzureProvider creates an AzureProvider comprising the mocked clients for zones and recordsets -func newMockedAzureProvider(domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool, resourceGroup string, userAssignedIdentityClientID string, zones *[]dns.Zone, recordSets *[]dns.RecordSet) (*AzureProvider, error) { +func newMockedAzureProvider(domainFilter endpoint.DomainFilter, zoneNameFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool, resourceGroup string, userAssignedIdentityClientID string, zones *[]dns.Zone, recordSets *[]dns.RecordSet) (*AzureProvider, error) { // init zone-related parts of the mock-client pageIterator := mockZoneListResultPageIterator{ results: []dns.ZoneListResult{ @@ -237,12 +237,13 @@ func newMockedAzureProvider(domainFilter endpoint.DomainFilter, zoneIDFilter pro mockRecordSetListIterator: &mockRecordSetListIterator, } - return newAzureProvider(domainFilter, zoneIDFilter, dryRun, resourceGroup, userAssignedIdentityClientID, &zonesClient, &recordSetsClient), nil + return newAzureProvider(domainFilter, zoneNameFilter, zoneIDFilter, dryRun, resourceGroup, userAssignedIdentityClientID, &zonesClient, &recordSetsClient), nil } -func newAzureProvider(domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool, resourceGroup string, userAssignedIdentityClientID string, zonesClient ZonesClient, recordsClient RecordSetsClient) *AzureProvider { +func newAzureProvider(domainFilter endpoint.DomainFilter, zoneNameFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, dryRun bool, resourceGroup string, userAssignedIdentityClientID string, zonesClient ZonesClient, recordsClient RecordSetsClient) *AzureProvider { return &AzureProvider{ domainFilter: domainFilter, + zoneNameFilter: zoneNameFilter, zoneIDFilter: zoneIDFilter, dryRun: dryRun, resourceGroup: resourceGroup, @@ -257,7 +258,7 @@ func validateAzureEndpoints(t *testing.T, endpoints []*endpoint.Endpoint, expect } func TestAzureRecord(t *testing.T) { - provider, err := newMockedAzureProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, "k8s", "", + provider, err := newMockedAzureProvider(endpoint.NewDomainFilter([]string{"example.com"}), endpoint.NewDomainFilter([]string{}), provider.NewZoneIDFilter([]string{""}), true, "k8s", "", &[]dns.Zone{ createMockZone("example.com", "/dnszones/example.com"), }, @@ -294,7 +295,7 @@ func TestAzureRecord(t *testing.T) { } func TestAzureMultiRecord(t *testing.T) { - provider, err := newMockedAzureProvider(endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, "k8s", "", + provider, err := newMockedAzureProvider(endpoint.NewDomainFilter([]string{"example.com"}), endpoint.NewDomainFilter([]string{}), provider.NewZoneIDFilter([]string{""}), true, "k8s", "", &[]dns.Zone{ createMockZone("example.com", "/dnszones/example.com"), }, @@ -393,6 +394,7 @@ func testAzureApplyChangesInternal(t *testing.T, dryRun bool, client RecordSetsC } provider := newAzureProvider( + endpoint.NewDomainFilter([]string{""}), endpoint.NewDomainFilter([]string{""}), provider.NewZoneIDFilter([]string{""}), dryRun, @@ -496,3 +498,138 @@ func TestAzureGetAccessToken(t *testing.T) { t.Fatalf("expect the clientID of the token is SPNClientID, but got token %s", string(innerToken)) } } + +func TestAzureNameFilter(t *testing.T) { + provider, err := newMockedAzureProvider(endpoint.NewDomainFilter([]string{"nginx.example.com"}), endpoint.NewDomainFilter([]string{"example.com"}), provider.NewZoneIDFilter([]string{""}), true, "k8s", "", + &[]dns.Zone{ + createMockZone("example.com", "/dnszones/example.com"), + }, + + &[]dns.RecordSet{ + createMockRecordSet("@", "NS", "ns1-03.azure-dns.com."), + createMockRecordSet("@", "SOA", "Email: azuredns-hostmaster.microsoft.com"), + createMockRecordSet("@", endpoint.RecordTypeA, "123.123.123.122"), + createMockRecordSet("@", endpoint.RecordTypeTXT, "heritage=external-dns,external-dns/owner=default"), + createMockRecordSetWithTTL("test.nginx", endpoint.RecordTypeA, "123.123.123.123", 3600), + createMockRecordSetWithTTL("nginx", endpoint.RecordTypeA, "123.123.123.123", 3600), + createMockRecordSetWithTTL("nginx", endpoint.RecordTypeTXT, "heritage=external-dns,external-dns/owner=default", recordTTL), + createMockRecordSetWithTTL("hack", endpoint.RecordTypeCNAME, "hack.azurewebsites.net", 10), + }) + + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + actual, err := provider.Records(ctx) + + if err != nil { + t.Fatal(err) + } + expected := []*endpoint.Endpoint{ + endpoint.NewEndpointWithTTL("test.nginx.example.com", endpoint.RecordTypeA, 3600, "123.123.123.123"), + endpoint.NewEndpointWithTTL("nginx.example.com", endpoint.RecordTypeA, 3600, "123.123.123.123"), + endpoint.NewEndpointWithTTL("nginx.example.com", endpoint.RecordTypeTXT, recordTTL, "heritage=external-dns,external-dns/owner=default"), + } + + validateAzureEndpoints(t, actual, expected) + +} + +func TestAzureApplyChangesZoneName(t *testing.T) { + recordsClient := mockRecordSetsClient{} + + testAzureApplyChangesInternalZoneName(t, false, &recordsClient) + + validateAzureEndpoints(t, recordsClient.deletedEndpoints, []*endpoint.Endpoint{ + endpoint.NewEndpoint("old.foo.example.com", endpoint.RecordTypeA, ""), + endpoint.NewEndpoint("oldcname.foo.example.com", endpoint.RecordTypeCNAME, ""), + endpoint.NewEndpoint("deleted.foo.example.com", endpoint.RecordTypeA, ""), + endpoint.NewEndpoint("deletedcname.foo.example.com", endpoint.RecordTypeCNAME, ""), + }) + + validateAzureEndpoints(t, recordsClient.updatedEndpoints, []*endpoint.Endpoint{ + endpoint.NewEndpointWithTTL("foo.example.com", endpoint.RecordTypeA, endpoint.TTL(recordTTL), "1.2.3.4", "1.2.3.5"), + endpoint.NewEndpointWithTTL("foo.example.com", endpoint.RecordTypeTXT, endpoint.TTL(recordTTL), "tag"), + endpoint.NewEndpointWithTTL("new.foo.example.com", endpoint.RecordTypeA, 3600, "111.222.111.222"), + endpoint.NewEndpointWithTTL("newcname.foo.example.com", endpoint.RecordTypeCNAME, 10, "other.com"), + }) +} + +func testAzureApplyChangesInternalZoneName(t *testing.T, dryRun bool, client RecordSetsClient) { + zlr := dns.ZoneListResult{ + Value: &[]dns.Zone{ + createMockZone("example.com", "/dnszones/example.com"), + }, + } + + results := []dns.ZoneListResult{ + zlr, + } + + mockZoneListResultPage := dns.NewZoneListResultPage(func(ctxParam context.Context, zlrParam dns.ZoneListResult) (dns.ZoneListResult, error) { + if len(results) > 0 { + result := results[0] + results = nil + return result, nil + } + return dns.ZoneListResult{}, nil + }) + mockZoneClientIterator := dns.NewZoneListResultIterator(mockZoneListResultPage) + + zonesClient := mockZonesClient{ + mockZonesClientIterator: &mockZoneClientIterator, + } + + provider := newAzureProvider( + endpoint.NewDomainFilter([]string{"foo.example.com"}), + endpoint.NewDomainFilter([]string{"example.com"}), + provider.NewZoneIDFilter([]string{""}), + dryRun, + "group", + "", + &zonesClient, + client, + ) + + createRecords := []*endpoint.Endpoint{ + endpoint.NewEndpoint("example.com", endpoint.RecordTypeA, "1.2.3.4"), + endpoint.NewEndpoint("example.com", endpoint.RecordTypeTXT, "tag"), + endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypeA, "1.2.3.5", "1.2.3.4"), + endpoint.NewEndpoint("foo.example.com", endpoint.RecordTypeTXT, "tag"), + endpoint.NewEndpoint("bar.example.com", endpoint.RecordTypeCNAME, "other.com"), + endpoint.NewEndpoint("bar.example.com", endpoint.RecordTypeTXT, "tag"), + endpoint.NewEndpoint("other.com", endpoint.RecordTypeA, "5.6.7.8"), + endpoint.NewEndpoint("other.com", endpoint.RecordTypeTXT, "tag"), + endpoint.NewEndpoint("nope.com", endpoint.RecordTypeA, "4.4.4.4"), + endpoint.NewEndpoint("nope.com", endpoint.RecordTypeTXT, "tag"), + } + + currentRecords := []*endpoint.Endpoint{ + endpoint.NewEndpoint("old.foo.example.com", endpoint.RecordTypeA, "121.212.121.212"), + endpoint.NewEndpoint("oldcname.foo.example.com", endpoint.RecordTypeCNAME, "other.com"), + endpoint.NewEndpoint("old.nope.example.com", endpoint.RecordTypeA, "121.212.121.212"), + } + updatedRecords := []*endpoint.Endpoint{ + endpoint.NewEndpointWithTTL("new.foo.example.com", endpoint.RecordTypeA, 3600, "111.222.111.222"), + endpoint.NewEndpointWithTTL("newcname.foo.example.com", endpoint.RecordTypeCNAME, 10, "other.com"), + endpoint.NewEndpoint("new.nope.example.com", endpoint.RecordTypeA, "222.111.222.111"), + } + + deleteRecords := []*endpoint.Endpoint{ + endpoint.NewEndpoint("deleted.foo.example.com", endpoint.RecordTypeA, "111.222.111.222"), + endpoint.NewEndpoint("deletedcname.foo.example.com", endpoint.RecordTypeCNAME, "other.com"), + endpoint.NewEndpoint("deleted.nope.example.com", endpoint.RecordTypeA, "222.111.222.111"), + } + + changes := &plan.Changes{ + Create: createRecords, + UpdateNew: updatedRecords, + UpdateOld: currentRecords, + Delete: deleteRecords, + } + + if err := provider.ApplyChanges(context.Background(), changes); err != nil { + t.Fatal(err) + } +} From 151259c7ffad06efaabd0b83a88107addd1d840e Mon Sep 17 00:00:00 2001 From: Michael Goodness Date: Wed, 9 Sep 2020 12:30:06 -0500 Subject: [PATCH 36/46] chore(kustomize): consolidate RBAC Signed-off-by: Michael Goodness --- kustomize/external-dns-clusterrole.yaml | 24 +++++++------------ .../external-dns-clusterrolebinding.yaml | 6 ++--- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/kustomize/external-dns-clusterrole.yaml b/kustomize/external-dns-clusterrole.yaml index 6dfc98c4e..3c2490aaf 100644 --- a/kustomize/external-dns-clusterrole.yaml +++ b/kustomize/external-dns-clusterrole.yaml @@ -3,18 +3,12 @@ kind: ClusterRole metadata: name: external-dns rules: -- apiGroups: [""] - resources: ["services"] - verbs: ["get","watch","list"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["get","watch","list"] -- apiGroups: ["extensions"] - resources: ["ingresses"] - verbs: ["get","watch","list"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["list"] -- apiGroups: [""] - resources: ["endpoints"] - verbs: ["get","watch","list"] + - apiGroups: [''] + resources: ['endpoints', 'pods', 'services'] + verbs: ['get', 'watch', 'list'] + - apiGroups: ['extensions'] + resources: ['ingresses'] + verbs: ['get', 'watch', 'list'] + - apiGroups: [''] + resources: ['nodes'] + verbs: ['list'] diff --git a/kustomize/external-dns-clusterrolebinding.yaml b/kustomize/external-dns-clusterrolebinding.yaml index 6630f84a2..3d08f8059 100644 --- a/kustomize/external-dns-clusterrolebinding.yaml +++ b/kustomize/external-dns-clusterrolebinding.yaml @@ -7,6 +7,6 @@ roleRef: kind: ClusterRole name: external-dns subjects: -- kind: ServiceAccount - name: external-dns - namespace: default + - kind: ServiceAccount + name: external-dns + namespace: default From 9b8a723d0f64a762ef6b1e35eaa28a0fec0183a6 Mon Sep 17 00:00:00 2001 From: Michael Goodness Date: Wed, 9 Sep 2020 12:30:27 -0500 Subject: [PATCH 37/46] chore(kustomize): move image tag to kustomization.yaml Signed-off-by: Michael Goodness --- kustomize/external-dns-deployment.yaml | 12 ++++++------ kustomize/kustomization.yaml | 15 +++++++++++---- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/kustomize/external-dns-deployment.yaml b/kustomize/external-dns-deployment.yaml index d98d29f62..212e16614 100644 --- a/kustomize/external-dns-deployment.yaml +++ b/kustomize/external-dns-deployment.yaml @@ -15,9 +15,9 @@ spec: spec: serviceAccountName: external-dns containers: - - name: external-dns - image: k8s.gcr.io/external-dns/external-dns:v0.7.3 - args: - - --source=service - - --source=ingress - - --registry=txt + - name: external-dns + image: k8s.gcr.io/external-dns/external-dns + args: + - --source=service + - --source=ingress + - --registry=txt diff --git a/kustomize/kustomization.yaml b/kustomize/kustomization.yaml index 46ba1c12a..52cf9b8f5 100644 --- a/kustomize/kustomization.yaml +++ b/kustomize/kustomization.yaml @@ -1,5 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +images: + - name: k8s.gcr.io/external-dns/external-dns + newTag: v0.7.3 + resources: -- ./external-dns-deployment.yaml -- ./external-dns-serviceaccount.yaml -- ./external-dns-clusterrole.yaml -- ./external-dns-clusterrolebinding.yaml + - ./external-dns-deployment.yaml + - ./external-dns-serviceaccount.yaml + - ./external-dns-clusterrole.yaml + - ./external-dns-clusterrolebinding.yaml From 3a1c1b65510b2e137b8a7d47b007cb96a87dc461 Mon Sep 17 00:00:00 2001 From: Vladimir Smagin <21h@blindage.org> Date: Fri, 11 Sep 2020 02:26:22 +0700 Subject: [PATCH 38/46] fix hetzner records update --- provider/hetzner/hetzner.go | 53 +++++++++++++++++++++++++++++--- provider/hetzner/hetzner_test.go | 6 ++++ 2 files changed, 55 insertions(+), 4 deletions(-) diff --git a/provider/hetzner/hetzner.go b/provider/hetzner/hetzner.go index 8e3d29c1d..c26a32332 100644 --- a/provider/hetzner/hetzner.go +++ b/provider/hetzner/hetzner.go @@ -117,14 +117,40 @@ func (p *HetznerProvider) submitChanges(ctx context.Context, changes []*HetznerC for _, changes := range zoneChanges { for _, change := range changes { + // Prepare record name + recordName := strings.TrimSuffix(change.ResourceRecordSet.Name, "."+change.ZoneName) + if recordName == change.ZoneName { + recordName = "@" + } + if change.ResourceRecordSet.RecordType == hclouddns.CNAME && !strings.HasSuffix(change.ResourceRecordSet.Value, ".") { + change.ResourceRecordSet.Value += "." + } + change.ResourceRecordSet.Name = recordName + + // Get ID of record if not create operation + if change.Action != hetznerCreate { + allRecords, err := p.Client.GetRecords(hclouddns.HCloudGetRecordsParams{ZoneID: change.ZoneID}) + if err != nil { + return err + } + for _, record := range allRecords.Records { + if record.Name == change.ResourceRecordSet.Name && record.RecordType == change.ResourceRecordSet.RecordType { + change.ResourceRecordSet.ID = record.ID + break + } + } + } + log.WithFields(log.Fields{ + "id": change.ResourceRecordSet.ID, "record": change.ResourceRecordSet.Name, "type": change.ResourceRecordSet.RecordType, + "value": change.ResourceRecordSet.Value, "ttl": change.ResourceRecordSet.TTL, "action": change.Action, "zone": change.ZoneName, "zone_id": change.ZoneID, - }).Info("Changing record.") + }).Info("Changing record") change.ResourceRecordSet.Name = strings.TrimSuffix(change.ResourceRecordSet.Name, "."+change.ZoneName) if change.ResourceRecordSet.Name == change.ZoneName { @@ -143,13 +169,24 @@ func (p *HetznerProvider) submitChanges(ctx context.Context, changes []*HetznerC Value: change.ResourceRecordSet.Value, TTL: change.ResourceRecordSet.TTL, } - _, err := p.Client.CreateRecord(record) + answer, err := p.Client.CreateRecord(record) if err != nil { + log.WithFields(log.Fields{ + "Code": answer.Error.Code, + "Message": answer.Error.Message, + "Record name": answer.Record.Name, + "Record type": answer.Record.RecordType, + "Record value": answer.Record.Value, + }).Warning("Create problem") return err } case hetznerDelete: - _, err := p.Client.DeleteRecord(change.ResourceRecordSet.ID) + answer, err := p.Client.DeleteRecord(change.ResourceRecordSet.ID) if err != nil { + log.WithFields(log.Fields{ + "Code": answer.Error.Code, + "Message": answer.Error.Message, + }).Warning("Delete problem") return err } case hetznerUpdate: @@ -159,9 +196,17 @@ func (p *HetznerProvider) submitChanges(ctx context.Context, changes []*HetznerC Name: change.ResourceRecordSet.Name, Value: change.ResourceRecordSet.Value, TTL: change.ResourceRecordSet.TTL, + ID: change.ResourceRecordSet.ID, } - _, err := p.Client.UpdateRecord(record) + answer, err := p.Client.UpdateRecord(record) if err != nil { + log.WithFields(log.Fields{ + "Code": answer.Error.Code, + "Message": answer.Error.Message, + "Record name": answer.Record.Name, + "Record type": answer.Record.RecordType, + "Record value": answer.Record.Value, + }).Warning("Update problem") return err } } diff --git a/provider/hetzner/hetzner_test.go b/provider/hetzner/hetzner_test.go index 2faa63816..a0db75492 100644 --- a/provider/hetzner/hetzner_test.go +++ b/provider/hetzner/hetzner_test.go @@ -36,6 +36,7 @@ type mockHCloudClientAdapter interface { ImportZoneString(zoneID string, zonePlainText string) (hclouddns.HCloudAnswerGetZone, error) ExportZoneToString(zoneID string) (hclouddns.HCloudAnswerGetZonePlainText, error) ValidateZoneString(zonePlainText string) (hclouddns.HCloudAnswerZoneValidate, error) + GetRecord(ID string) (hclouddns.HCloudAnswerGetRecord, error) GetRecords(params hclouddns.HCloudGetRecordsParams) (hclouddns.HCloudAnswerGetRecords, error) UpdateRecord(record hclouddns.HCloudRecord) (hclouddns.HCloudAnswerGetRecord, error) DeleteRecord(ID string) (hclouddns.HCloudAnswerDeleteRecord, error) @@ -95,6 +96,11 @@ func (m *mockHCloudClient) ValidateZoneString(zonePlainText string) (hclouddns.H } // records + +func (m *mockHCloudClient) GetRecord(ID string) (hclouddns.HCloudAnswerGetRecord, error) { + return hclouddns.HCloudAnswerGetRecord{}, nil +} + func (m *mockHCloudClient) GetRecords(params hclouddns.HCloudGetRecordsParams) (hclouddns.HCloudAnswerGetRecords, error) { return hclouddns.HCloudAnswerGetRecords{ Records: []hclouddns.HCloudRecord{ From ca5bd7ca4fb1dac17da5b61e004054cc8bbb79dc Mon Sep 17 00:00:00 2001 From: Vladimir Smagin <21h@blindage.org> Date: Fri, 11 Sep 2020 02:28:08 +0700 Subject: [PATCH 39/46] bump lib version --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index fd40c7ed5..afe4c84ae 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.14 require ( cloud.google.com/go v0.50.0 - git.blindage.org/21h/hcloud-dns v0.0.0-20200525170043-def10a4a28e0 + git.blindage.org/21h/hcloud-dns v0.0.0-20200807003420-f768ffe03f8d github.com/Azure/azure-sdk-for-go v45.1.0+incompatible github.com/Azure/go-autorest/autorest v0.11.4 github.com/Azure/go-autorest/autorest/adal v0.9.2 From d06dff145c67d5c79b4691038bc4e38ffd71c760 Mon Sep 17 00:00:00 2001 From: Sergei Zyubin Date: Mon, 16 Mar 2020 22:17:24 +0100 Subject: [PATCH 40/46] Add externalIPs for LoadBalancer type --- source/service.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/source/service.go b/source/service.go index 2c8fb0997..a6e3e3d30 100644 --- a/source/service.go +++ b/source/service.go @@ -486,7 +486,11 @@ func extractServiceExternalName(svc *v1.Service) endpoint.Targets { } func extractLoadBalancerTargets(svc *v1.Service) endpoint.Targets { - var targets endpoint.Targets + var ( + targets endpoint.Targets + externalIPs endpoint.Targets + ) + // Create a corresponding endpoint for each configured external entrypoint. for _, lb := range svc.Status.LoadBalancer.Ingress { @@ -498,6 +502,16 @@ func extractLoadBalancerTargets(svc *v1.Service) endpoint.Targets { } } + if svc.Spec.ExternalIPs != nil { + for _, ext := range svc.Spec.ExternalIPs { + externalIPs = append(externalIPs, ext) + } + } + + if len(externalIPs) > 0 { + return externalIPs + } + return targets } From cc6a1fb3e7722ac80b28f0ca00d8ba9ca7f1248d Mon Sep 17 00:00:00 2001 From: Sergei Zyubin Date: Tue, 17 Mar 2020 14:10:30 +0100 Subject: [PATCH 41/46] Add test for LoadBalancer and ExternalIPs --- source/service.go | 7 ++--- source/service_test.go | 71 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 71 insertions(+), 7 deletions(-) diff --git a/source/service.go b/source/service.go index a6e3e3d30..4a8e64948 100644 --- a/source/service.go +++ b/source/service.go @@ -491,7 +491,6 @@ func extractLoadBalancerTargets(svc *v1.Service) endpoint.Targets { externalIPs endpoint.Targets ) - // Create a corresponding endpoint for each configured external entrypoint. for _, lb := range svc.Status.LoadBalancer.Ingress { if lb.IP != "" { @@ -503,14 +502,14 @@ func extractLoadBalancerTargets(svc *v1.Service) endpoint.Targets { } if svc.Spec.ExternalIPs != nil { - for _, ext := range svc.Spec.ExternalIPs { - externalIPs = append(externalIPs, ext) + for _, ext := range svc.Spec.ExternalIPs { + externalIPs = append(externalIPs, ext) } } if len(externalIPs) > 0 { return externalIPs - } + } return targets } diff --git a/source/service_test.go b/source/service_test.go index 081ef2247..2cfa9566e 100644 --- a/source/service_test.go +++ b/source/service_test.go @@ -174,6 +174,7 @@ func testServiceSourceEndpoints(t *testing.T) { labels map[string]string annotations map[string]string clusterIP string + externalIPs []string lbs []string serviceTypesFilter []string expected []*endpoint.Endpoint @@ -193,13 +194,14 @@ func testServiceSourceEndpoints(t *testing.T) { map[string]string{}, map[string]string{}, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, false, }, { - "no annotated services return no endpoints when ignoreing annotations", + "no annotated services return no endpoints when ignoring annotations", "", "", "testing", @@ -212,6 +214,7 @@ func testServiceSourceEndpoints(t *testing.T) { map[string]string{}, map[string]string{}, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -233,6 +236,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -256,6 +260,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -279,6 +284,7 @@ func testServiceSourceEndpoints(t *testing.T) { "1.2.3.4", []string{}, []string{}, + []string{}, []*endpoint.Endpoint{}, false, }, @@ -296,6 +302,7 @@ func testServiceSourceEndpoints(t *testing.T) { map[string]string{}, map[string]string{}, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -318,6 +325,7 @@ func testServiceSourceEndpoints(t *testing.T) { map[string]string{}, map[string]string{}, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -342,6 +350,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org., bar.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -368,6 +377,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org., bar.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -392,6 +402,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org., bar.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -416,6 +427,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org, bar.example.org", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -440,6 +452,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"lb.example.com"}, // Kubernetes omits the trailing dot []string{}, []*endpoint.Endpoint{ @@ -463,6 +476,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org", // Trailing dot is omitted }, "", + []string{}, []string{"1.2.3.4", "lb.example.com"}, // Kubernetes omits the trailing dot []string{}, []*endpoint.Endpoint{ @@ -488,6 +502,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -512,6 +527,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -533,6 +549,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -556,6 +573,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -577,6 +595,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -601,6 +620,7 @@ func testServiceSourceEndpoints(t *testing.T) { "service.beta.kubernetes.io/external-traffic": "OnlyLocal", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -625,6 +645,7 @@ func testServiceSourceEndpoints(t *testing.T) { "service.beta.kubernetes.io/external-traffic": "SomethingElse", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -647,6 +668,7 @@ func testServiceSourceEndpoints(t *testing.T) { "service.beta.kubernetes.io/external-traffic": "OnlyLocal", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -669,6 +691,7 @@ func testServiceSourceEndpoints(t *testing.T) { "service.beta.kubernetes.io/external-traffic": "Global", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -693,6 +716,7 @@ func testServiceSourceEndpoints(t *testing.T) { "service.beta.kubernetes.io/external-traffic": "OnlyLocal", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -716,9 +740,34 @@ func testServiceSourceEndpoints(t *testing.T) { "", []string{}, []string{}, + []string{}, []*endpoint.Endpoint{}, false, }, + { + "annotated service with externalIPs returns a single endpoint with multiple targets", + "", + "", + "testing", + "foo", + v1.ServiceTypeLoadBalancer, + "", + "", + false, + false, + map[string]string{}, + map[string]string{ + hostnameAnnotationKey: "foo.example.org.", + }, + "", + []string{"10.2.3.4", "11.2.3.4"}, + []string{"1.2.3.4"}, + []string{}, + []*endpoint.Endpoint{ + {DNSName: "foo.example.org", Targets: endpoint.Targets{"10.2.3.4", "11.2.3.4"}}, + }, + false, + }, { "multiple external entrypoints return a single endpoint with multiple targets", "", @@ -735,6 +784,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4", "8.8.8.8"}, []string{}, []*endpoint.Endpoint{ @@ -758,6 +808,7 @@ func testServiceSourceEndpoints(t *testing.T) { "zalando.org/dnsname": "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -779,6 +830,7 @@ func testServiceSourceEndpoints(t *testing.T) { "zalando.org/dnsname": "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -804,6 +856,7 @@ func testServiceSourceEndpoints(t *testing.T) { "domainName": "foo.example.org., bar.example.org", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -826,6 +879,7 @@ func testServiceSourceEndpoints(t *testing.T) { map[string]string{}, map[string]string{}, "", + []string{}, []string{"1.2.3.4", "elb.com"}, []string{}, []*endpoint.Endpoint{ @@ -850,6 +904,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4", "elb.com"}, []string{}, []*endpoint.Endpoint{ @@ -874,6 +929,7 @@ func testServiceSourceEndpoints(t *testing.T) { "zalando.org/dnsname": "mate.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -895,6 +951,7 @@ func testServiceSourceEndpoints(t *testing.T) { map[string]string{}, map[string]string{}, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{}, @@ -916,6 +973,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -940,6 +998,7 @@ func testServiceSourceEndpoints(t *testing.T) { ttlAnnotationKey: "foo", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -964,6 +1023,7 @@ func testServiceSourceEndpoints(t *testing.T) { ttlAnnotationKey: "10", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -988,6 +1048,7 @@ func testServiceSourceEndpoints(t *testing.T) { ttlAnnotationKey: "1m", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -1012,6 +1073,7 @@ func testServiceSourceEndpoints(t *testing.T) { ttlAnnotationKey: "-10", }, "", + []string{}, []string{"1.2.3.4"}, []string{}, []*endpoint.Endpoint{ @@ -1035,6 +1097,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{string(v1.ServiceTypeLoadBalancer)}, []*endpoint.Endpoint{ @@ -1058,6 +1121,7 @@ func testServiceSourceEndpoints(t *testing.T) { hostnameAnnotationKey: "foo.example.org.", }, "", + []string{}, []string{"1.2.3.4"}, []string{string(v1.ServiceTypeLoadBalancer)}, []*endpoint.Endpoint{}, @@ -1080,8 +1144,9 @@ func testServiceSourceEndpoints(t *testing.T) { service := &v1.Service{ Spec: v1.ServiceSpec{ - Type: tc.svcType, - ClusterIP: tc.clusterIP, + Type: tc.svcType, + ClusterIP: tc.clusterIP, + ExternalIPs: tc.externalIPs, }, ObjectMeta: metav1.ObjectMeta{ Namespace: tc.svcNamespace, From 323da6bba8cb0d638c02e8da2cde411cd61aae7e Mon Sep 17 00:00:00 2001 From: Sergei Zyubin Date: Sat, 12 Sep 2020 12:39:05 +0200 Subject: [PATCH 42/46] Add notes to README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 853f86c60..639bfbd34 100644 --- a/README.md +++ b/README.md @@ -216,6 +216,8 @@ The [tutorials](docs/tutorials) section contains examples, including Ingress res If using a txt registry and attempting to use a CNAME the `--txt-prefix` must be set to avoid conflicts. Changing `--txt-prefix` will result in lost ownership over previously created records. +If `externalIPs` list is defined for a `LoadBalancer` service, this list will be used instead of an assigned load balancer IP to create a DNS record. It's useful when you run bare metal Kubernetes clusters behind NAT or in a similar setup, where a load balancer IP differs from a public IP (e.g. with [MetalLB](https://metallb.universe.tf)). + # Roadmap ExternalDNS was built with extensibility in mind. Adding and experimenting with new DNS providers and sources of desired DNS records should be as easy as possible. It should also be possible to modify how ExternalDNS behaves—e.g. whether it should add records but never delete them. From b75151e3e54fca20c52287a62b28b8c85663d9dd Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Wed, 4 Dec 2019 18:48:43 -0500 Subject: [PATCH 43/46] add service annotation to set public/private iface for NodePort Signed-off-by: Evan Baker --- source/service.go | 8 +++++++- source/source.go | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/source/service.go b/source/service.go index 2c8fb0997..657f85389 100644 --- a/source/service.go +++ b/source/service.go @@ -556,10 +556,16 @@ func (sc *serviceSource) extractNodePortTargets(svc *v1.Service) (endpoint.Targe } } + access := getAccessFromAnnotations(svc.Annotations) + if access == "public" { + return externalIPs, nil + } + if access == "private" { + return internalIPs, nil + } if len(externalIPs) > 0 { return externalIPs, nil } - return internalIPs, nil } diff --git a/source/source.go b/source/source.go index 6c99f3db7..ae00047d7 100644 --- a/source/source.go +++ b/source/source.go @@ -38,6 +38,8 @@ const ( controllerAnnotationKey = "external-dns.alpha.kubernetes.io/controller" // The annotation used for defining the desired hostname hostnameAnnotationKey = "external-dns.alpha.kubernetes.io/hostname" + // The annotation used for specifying whether the public or private interface address is used + accessAnnotationKey = "external-dns.alpha.kubernetes.io/access" // The annotation used for defining the desired ingress target targetAnnotationKey = "external-dns.alpha.kubernetes.io/target" // The annotation used for defining the desired DNS record TTL @@ -107,6 +109,11 @@ func getHostnamesFromAnnotations(annotations map[string]string) []string { return strings.Split(strings.Replace(hostnameAnnotation, " ", "", -1), ",") } +func getAccessFromAnnotations(annotations map[string]string) string { + accessAnnotation := annotations[accessAnnotationKey] + return accessAnnotation +} + func getAliasFromAnnotations(annotations map[string]string) bool { aliasAnnotation, exists := annotations[aliasAnnotationKey] return exists && aliasAnnotation == "true" From 54320a16ab7d9905176092ce098c996ed4a1e82e Mon Sep 17 00:00:00 2001 From: rbtr Date: Mon, 24 Aug 2020 15:41:58 -0400 Subject: [PATCH 44/46] add doc and test --- docs/faq.md | 10 +++++ source/service_test.go | 94 ++++++++++++++++++++++++++++++++++++++++++ source/source.go | 3 +- 3 files changed, 105 insertions(+), 2 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 1ae350065..46663f4d2 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -275,6 +275,16 @@ and one with `--annotation-filter=kubernetes.io/ingress.class=nginx-external`. Beware when using multiple sources, e.g. `--source=service --source=ingress`, `--annotation-filter` will filter every given source objects. If you need to filter only one specific source you have to run a separated external dns service containing only the wanted `--source` and `--annotation-filter`. +### How do I specify that I want the DNS record to point to either the Node's public or private IP when it has both? + +If your Nodes have both public and private IP addresses, you might want to write DNS records with one or the other. +For example, you may want to write a DNS record in a private zone that resolves to your Nodes' private IPs so that traffic never leaves your private network. + +To accomplish this, set this annotation on your service: `external-dns.alpha.kubernetes.io/access=private` +Conversely, to force the public IP: `external-dns.alpha.kubernetes.io/access=public` + +If this annotation is not set, and the node has both public and private IP addresses, then the public IP will be used by default. + ### Can external-dns manage(add/remove) records in a hosted zone which is setup in different AWS account? Yes, give it the correct cross-account/assume-role permissions and use the `--aws-assume-role` flag https://github.com/kubernetes-sigs/external-dns/pull/524#issue-181256561 diff --git a/source/service_test.go b/source/service_test.go index 081ef2247..37768d5f2 100644 --- a/source/service_test.go +++ b/source/service_test.go @@ -1591,6 +1591,100 @@ func TestNodePortServices(t *testing.T) { []int{1, 1}, []v1.PodPhase{v1.PodRunning, v1.PodRunning}, }, + { + "access=private annotation NodePort services return an endpoint with private IP addresses of the cluster's nodes", + "", + "", + "testing", + "foo", + v1.ServiceTypeNodePort, + v1.ServiceExternalTrafficPolicyTypeCluster, + "", + "", + false, + map[string]string{}, + map[string]string{ + hostnameAnnotationKey: "foo.example.org.", + accessAnnotationKey: "private", + }, + nil, + []*endpoint.Endpoint{ + {DNSName: "_30192._tcp.foo.example.org", Targets: endpoint.Targets{"0 50 30192 foo.example.org"}, RecordType: endpoint.RecordTypeSRV}, + {DNSName: "foo.example.org", Targets: endpoint.Targets{"10.0.1.1", "10.0.1.2"}, RecordType: endpoint.RecordTypeA}, + }, + false, + []*v1.Node{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "54.10.11.1"}, + {Type: v1.NodeInternalIP, Address: "10.0.1.1"}, + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "54.10.11.2"}, + {Type: v1.NodeInternalIP, Address: "10.0.1.2"}, + }, + }, + }}, + []string{}, + []int{}, + []v1.PodPhase{}, + }, + { + "access=public annotation NodePort services return an endpoint with public IP addresses of the cluster's nodes", + "", + "", + "testing", + "foo", + v1.ServiceTypeNodePort, + v1.ServiceExternalTrafficPolicyTypeCluster, + "", + "", + false, + map[string]string{}, + map[string]string{ + hostnameAnnotationKey: "foo.example.org.", + accessAnnotationKey: "public", + }, + nil, + []*endpoint.Endpoint{ + {DNSName: "_30192._tcp.foo.example.org", Targets: endpoint.Targets{"0 50 30192 foo.example.org"}, RecordType: endpoint.RecordTypeSRV}, + {DNSName: "foo.example.org", Targets: endpoint.Targets{"54.10.11.1", "54.10.11.2"}, RecordType: endpoint.RecordTypeA}, + }, + false, + []*v1.Node{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "54.10.11.1"}, + {Type: v1.NodeInternalIP, Address: "10.0.1.1"}, + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "54.10.11.2"}, + {Type: v1.NodeInternalIP, Address: "10.0.1.2"}, + }, + }, + }}, + []string{}, + []int{}, + []v1.PodPhase{}, + }, } { t.Run(tc.title, func(t *testing.T) { // Create a Kubernetes testing client diff --git a/source/source.go b/source/source.go index ae00047d7..a9acd52d6 100644 --- a/source/source.go +++ b/source/source.go @@ -110,8 +110,7 @@ func getHostnamesFromAnnotations(annotations map[string]string) []string { } func getAccessFromAnnotations(annotations map[string]string) string { - accessAnnotation := annotations[accessAnnotationKey] - return accessAnnotation + return annotations[accessAnnotationKey] } func getAliasFromAnnotations(annotations map[string]string) bool { From fefb1b27777a58f2444c6ff4361a83909bde1dae Mon Sep 17 00:00:00 2001 From: Benjile Date: Fri, 18 Sep 2020 09:12:14 +0200 Subject: [PATCH 45/46] Optionally ignore tls rules in ingress source --- main.go | 1 + pkg/apis/externaldns/types.go | 3 +++ pkg/apis/externaldns/types_test.go | 3 +++ source/ingress.go | 21 ++++++++++------ source/ingress_test.go | 39 +++++++++++++++++++++++++++++- source/store.go | 3 ++- 6 files changed, 60 insertions(+), 10 deletions(-) diff --git a/main.go b/main.go index 1a02976a2..0c61c7301 100644 --- a/main.go +++ b/main.go @@ -103,6 +103,7 @@ func main() { FQDNTemplate: cfg.FQDNTemplate, CombineFQDNAndAnnotation: cfg.CombineFQDNAndAnnotation, IgnoreHostnameAnnotation: cfg.IgnoreHostnameAnnotation, + IgnoreIngressTLSSpec: cfg.IgnoreIngressTLSSpec, Compatibility: cfg.Compatibility, PublishInternal: cfg.PublishInternal, PublishHostIP: cfg.PublishHostIP, diff --git a/pkg/apis/externaldns/types.go b/pkg/apis/externaldns/types.go index 204996883..000ea9a7c 100644 --- a/pkg/apis/externaldns/types.go +++ b/pkg/apis/externaldns/types.go @@ -50,6 +50,7 @@ type Config struct { FQDNTemplate string CombineFQDNAndAnnotation bool IgnoreHostnameAnnotation bool + IgnoreIngressTLSSpec bool Compatibility string PublishInternal bool PublishHostIP bool @@ -158,6 +159,7 @@ var defaultConfig = &Config{ FQDNTemplate: "", CombineFQDNAndAnnotation: false, IgnoreHostnameAnnotation: false, + IgnoreIngressTLSSpec: false, Compatibility: "", PublishInternal: false, PublishHostIP: false, @@ -310,6 +312,7 @@ func (cfg *Config) ParseFlags(args []string) error { app.Flag("fqdn-template", "A templated string that's used to generate DNS names from sources that don't define a hostname themselves, or to add a hostname suffix when paired with the fake source (optional). Accepts comma separated list for multiple global FQDN.").Default(defaultConfig.FQDNTemplate).StringVar(&cfg.FQDNTemplate) app.Flag("combine-fqdn-annotation", "Combine FQDN template and Annotations instead of overwriting").BoolVar(&cfg.CombineFQDNAndAnnotation) app.Flag("ignore-hostname-annotation", "Ignore hostname annotation when generating DNS names, valid only when using fqdn-template is set (optional, default: false)").BoolVar(&cfg.IgnoreHostnameAnnotation) + app.Flag("ignore-ingress-tls-spec", "Ignore tls spec section in ingresses resources, applicable only for ingress sources (optional, default: false)").BoolVar(&cfg.IgnoreIngressTLSSpec) app.Flag("compatibility", "Process annotation semantics from legacy implementations (optional, options: mate, molecule)").Default(defaultConfig.Compatibility).EnumVar(&cfg.Compatibility, "", "mate", "molecule") app.Flag("publish-internal-services", "Allow external-dns to publish DNS records for ClusterIP services (optional)").BoolVar(&cfg.PublishInternal) app.Flag("publish-host-ip", "Allow external-dns to publish host-ip for headless services (optional)").BoolVar(&cfg.PublishHostIP) diff --git a/pkg/apis/externaldns/types_test.go b/pkg/apis/externaldns/types_test.go index 14c6a1e07..60d5afdfb 100644 --- a/pkg/apis/externaldns/types_test.go +++ b/pkg/apis/externaldns/types_test.go @@ -112,6 +112,7 @@ var ( Sources: []string{"service", "ingress", "connector"}, Namespace: "namespace", IgnoreHostnameAnnotation: true, + IgnoreIngressTLSSpec: true, FQDNTemplate: "{{.Name}}.service.example.com", Compatibility: "mate", Provider: "google", @@ -216,6 +217,7 @@ func TestParseFlags(t *testing.T) { "--namespace=namespace", "--fqdn-template={{.Name}}.service.example.com", "--ignore-hostname-annotation", + "--ignore-ingress-tls-spec", "--compatibility=mate", "--provider=google", "--google-project=project", @@ -306,6 +308,7 @@ func TestParseFlags(t *testing.T) { "EXTERNAL_DNS_NAMESPACE": "namespace", "EXTERNAL_DNS_FQDN_TEMPLATE": "{{.Name}}.service.example.com", "EXTERNAL_DNS_IGNORE_HOSTNAME_ANNOTATION": "1", + "EXTERNAL_DNS_IGNORE_INGRESS_TLS_SPEC": "1", "EXTERNAL_DNS_COMPATIBILITY": "mate", "EXTERNAL_DNS_PROVIDER": "google", "EXTERNAL_DNS_GOOGLE_PROJECT": "project", diff --git a/source/ingress.go b/source/ingress.go index 89775f90a..0ad7f94b9 100644 --- a/source/ingress.go +++ b/source/ingress.go @@ -56,10 +56,11 @@ type ingressSource struct { combineFQDNAnnotation bool ignoreHostnameAnnotation bool ingressInformer extinformers.IngressInformer + ignoreIngressTLSSpec bool } // NewIngressSource creates a new ingressSource with the given config. -func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilter string, fqdnTemplate string, combineFqdnAnnotation bool, ignoreHostnameAnnotation bool) (Source, error) { +func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilter string, fqdnTemplate string, combineFqdnAnnotation bool, ignoreHostnameAnnotation bool, ignoreIngressTLSSpec bool) (Source, error) { var ( tmpl *template.Template err error @@ -105,6 +106,7 @@ func NewIngressSource(kubeClient kubernetes.Interface, namespace, annotationFilt combineFQDNAnnotation: combineFqdnAnnotation, ignoreHostnameAnnotation: ignoreHostnameAnnotation, ingressInformer: ingressInformer, + ignoreIngressTLSSpec: ignoreIngressTLSSpec, } return sc, nil } @@ -132,7 +134,7 @@ func (sc *ingressSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, e continue } - ingEndpoints := endpointsFromIngress(ing, sc.ignoreHostnameAnnotation) + ingEndpoints := endpointsFromIngress(ing, sc.ignoreHostnameAnnotation, sc.ignoreIngressTLSSpec) // apply template if host is missing on ingress if (sc.combineFQDNAnnotation || len(ingEndpoints) == 0) && sc.fqdnTemplate != nil { @@ -240,7 +242,7 @@ func (sc *ingressSource) setDualstackLabel(ingress *v1beta1.Ingress, endpoints [ } // endpointsFromIngress extracts the endpoints from ingress object -func endpointsFromIngress(ing *v1beta1.Ingress, ignoreHostnameAnnotation bool) []*endpoint.Endpoint { +func endpointsFromIngress(ing *v1beta1.Ingress, ignoreHostnameAnnotation bool, ignoreIngressTLSSpec bool) []*endpoint.Endpoint { var endpoints []*endpoint.Endpoint ttl, err := getTTLFromAnnotations(ing.Annotations) @@ -263,12 +265,15 @@ func endpointsFromIngress(ing *v1beta1.Ingress, ignoreHostnameAnnotation bool) [ endpoints = append(endpoints, endpointsForHostname(rule.Host, targets, ttl, providerSpecific, setIdentifier)...) } - for _, tls := range ing.Spec.TLS { - for _, host := range tls.Hosts { - if host == "" { - continue + // Skip endpoints if we do not want entries from tls spec section + if !ignoreIngressTLSSpec { + for _, tls := range ing.Spec.TLS { + for _, host := range tls.Hosts { + if host == "" { + continue + } + endpoints = append(endpoints, endpointsForHostname(host, targets, ttl, providerSpecific, setIdentifier)...) } - endpoints = append(endpoints, endpointsForHostname(host, targets, ttl, providerSpecific, setIdentifier)...) } } diff --git a/source/ingress_test.go b/source/ingress_test.go index f30060250..634812603 100644 --- a/source/ingress_test.go +++ b/source/ingress_test.go @@ -52,6 +52,7 @@ func (suite *IngressSuite) SetupTest() { "{{.Name}}", false, false, + false, ) suite.NoError(err, "should initialize ingress source") @@ -134,6 +135,7 @@ func TestNewIngressSource(t *testing.T) { ti.fqdnTemplate, ti.combineFQDNAndAnnotation, false, + false, ) if ti.expectError { assert.Error(t, err) @@ -221,7 +223,7 @@ func testEndpointsFromIngress(t *testing.T) { } { t.Run(ti.title, func(t *testing.T) { realIngress := ti.ingress.Ingress() - validateEndpoints(t, endpointsFromIngress(realIngress, false), ti.expected) + validateEndpoints(t, endpointsFromIngress(realIngress, false, false), ti.expected) }) } } @@ -238,6 +240,7 @@ func testIngressEndpoints(t *testing.T) { fqdnTemplate string combineFQDNAndAnnotation bool ignoreHostnameAnnotation bool + ignoreIngressTLSSpec bool }{ { title: "no ingress", @@ -993,6 +996,39 @@ func testIngressEndpoints(t *testing.T) { }, }, }, + { + title: "ignore tls section", + targetNamespace: "", + ignoreIngressTLSSpec: true, + ingressItems: []fakeIngress{ + { + name: "fake1", + namespace: namespace, + tlsdnsnames: [][]string{{"example.org"}}, + ips: []string{"1.2.3.4"}, + }, + }, + expected: []*endpoint.Endpoint{}, + }, + { + title: "reading tls section", + targetNamespace: "", + ignoreIngressTLSSpec: false, + ingressItems: []fakeIngress{ + { + name: "fake1", + namespace: namespace, + tlsdnsnames: [][]string{{"example.org"}}, + ips: []string{"1.2.3.4"}, + }, + }, + expected: []*endpoint.Endpoint{ + { + DNSName: "example.org", + Targets: endpoint.Targets{"1.2.3.4"}, + }, + }, + }, } { t.Run(ti.title, func(t *testing.T) { ingresses := make([]*v1beta1.Ingress, 0) @@ -1008,6 +1044,7 @@ func testIngressEndpoints(t *testing.T) { ti.fqdnTemplate, ti.combineFQDNAndAnnotation, ti.ignoreHostnameAnnotation, + ti.ignoreIngressTLSSpec, ) for _, ingress := range ingresses { _, err := fakeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.Background(), ingress, metav1.CreateOptions{}) diff --git a/source/store.go b/source/store.go index c6cd064a2..a06571f82 100644 --- a/source/store.go +++ b/source/store.go @@ -45,6 +45,7 @@ type Config struct { FQDNTemplate string CombineFQDNAndAnnotation bool IgnoreHostnameAnnotation bool + IgnoreIngressTLSSpec bool Compatibility string PublishInternal bool PublishHostIP bool @@ -184,7 +185,7 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err if err != nil { return nil, err } - return NewIngressSource(client, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation) + return NewIngressSource(client, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation, cfg.IgnoreIngressTLSSpec) case "istio-gateway": kubernetesClient, err := p.KubeClient() if err != nil { From 79ea64884bbfc89f9c1f444c9dddd5ce361f31ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Mar=C3=A7al?= Date: Thu, 24 Sep 2020 10:28:05 +0200 Subject: [PATCH 46/46] Added labelFilter for source CRD (#1461) * Added label filter for source CRD * Fixed bug with labels and added tests for source CRD * Fixed formating * Update source/crd_test.go Co-authored-by: Vinny Sabatini Co-authored-by: Vinny Sabatini --- main.go | 1 + pkg/apis/externaldns/types.go | 3 +++ source/crd.go | 16 ++++++++++-- source/crd_test.go | 49 ++++++++++++++++++++++++++++++++--- source/store.go | 3 ++- 5 files changed, 66 insertions(+), 6 deletions(-) diff --git a/main.go b/main.go index 3e2f81a1f..867de5da9 100644 --- a/main.go +++ b/main.go @@ -100,6 +100,7 @@ func main() { sourceCfg := &source.Config{ Namespace: cfg.Namespace, AnnotationFilter: cfg.AnnotationFilter, + LabelFilter: cfg.LabelFilter, FQDNTemplate: cfg.FQDNTemplate, CombineFQDNAndAnnotation: cfg.CombineFQDNAndAnnotation, IgnoreHostnameAnnotation: cfg.IgnoreHostnameAnnotation, diff --git a/pkg/apis/externaldns/types.go b/pkg/apis/externaldns/types.go index 037d9bfbd..c6f96ae1a 100644 --- a/pkg/apis/externaldns/types.go +++ b/pkg/apis/externaldns/types.go @@ -47,6 +47,7 @@ type Config struct { Sources []string Namespace string AnnotationFilter string + LabelFilter string FQDNTemplate string CombineFQDNAndAnnotation bool IgnoreHostnameAnnotation bool @@ -157,6 +158,7 @@ var defaultConfig = &Config{ Sources: nil, Namespace: "", AnnotationFilter: "", + LabelFilter: "", FQDNTemplate: "", CombineFQDNAndAnnotation: false, IgnoreHostnameAnnotation: false, @@ -310,6 +312,7 @@ func (cfg *Config) ParseFlags(args []string) error { app.Flag("namespace", "Limit sources of endpoints to a specific namespace (default: all namespaces)").Default(defaultConfig.Namespace).StringVar(&cfg.Namespace) app.Flag("annotation-filter", "Filter sources managed by external-dns via annotation using label selector semantics (default: all sources)").Default(defaultConfig.AnnotationFilter).StringVar(&cfg.AnnotationFilter) + app.Flag("label-filter", "Filter sources managed by external-dns via label selector when listing all resources; currently only supported by source CRD").Default(defaultConfig.LabelFilter).StringVar(&cfg.LabelFilter) app.Flag("fqdn-template", "A templated string that's used to generate DNS names from sources that don't define a hostname themselves, or to add a hostname suffix when paired with the fake source (optional). Accepts comma separated list for multiple global FQDN.").Default(defaultConfig.FQDNTemplate).StringVar(&cfg.FQDNTemplate) app.Flag("combine-fqdn-annotation", "Combine FQDN template and Annotations instead of overwriting").BoolVar(&cfg.CombineFQDNAndAnnotation) app.Flag("ignore-hostname-annotation", "Ignore hostname annotation when generating DNS names, valid only when using fqdn-template is set (optional, default: false)").BoolVar(&cfg.IgnoreHostnameAnnotation) diff --git a/source/crd.go b/source/crd.go index 26ac77e50..a897d45aa 100644 --- a/source/crd.go +++ b/source/crd.go @@ -43,6 +43,7 @@ type crdSource struct { crdResource string codec runtime.ParameterCodec annotationFilter string + labelFilter string } func addKnownTypes(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error { @@ -102,11 +103,12 @@ func NewCRDClientForAPIVersionKind(client kubernetes.Interface, kubeConfig, apiS } // NewCRDSource creates a new crdSource with the given config. -func NewCRDSource(crdClient rest.Interface, namespace, kind string, annotationFilter string, scheme *runtime.Scheme) (Source, error) { +func NewCRDSource(crdClient rest.Interface, namespace, kind string, annotationFilter string, labelFilter string, scheme *runtime.Scheme) (Source, error) { return &crdSource{ crdResource: strings.ToLower(kind) + "s", namespace: namespace, annotationFilter: annotationFilter, + labelFilter: labelFilter, crdClient: crdClient, codec: runtime.NewParameterCodec(scheme), }, nil @@ -119,12 +121,22 @@ func (cs *crdSource) AddEventHandler(ctx context.Context, handler func()) { func (cs *crdSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) { endpoints := []*endpoint.Endpoint{} - result, err := cs.List(ctx, &metav1.ListOptions{}) + var ( + result *endpoint.DNSEndpointList + err error + ) + + if cs.labelFilter != "" { + result, err = cs.List(ctx, &metav1.ListOptions{LabelSelector: cs.labelFilter}) + } else { + result, err = cs.List(ctx, &metav1.ListOptions{}) + } if err != nil { return nil, err } result, err = cs.filterByAnnotations(result) + if err != nil { return nil, err } diff --git a/source/crd_test.go b/source/crd_test.go index c4f882990..c669b5080 100644 --- a/source/crd_test.go +++ b/source/crd_test.go @@ -57,7 +57,7 @@ func objBody(codec runtime.Encoder, obj runtime.Object) io.ReadCloser { return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) } -func startCRDServerToServeTargets(endpoints []*endpoint.Endpoint, apiVersion, kind, namespace, name string, annotations map[string]string, t *testing.T) rest.Interface { +func startCRDServerToServeTargets(endpoints []*endpoint.Endpoint, apiVersion, kind, namespace, name string, annotations map[string]string, labels map[string]string, t *testing.T) rest.Interface { groupVersion, _ := schema.ParseGroupVersion(apiVersion) scheme := runtime.NewScheme() addKnownTypes(scheme, groupVersion) @@ -72,6 +72,7 @@ func startCRDServerToServeTargets(endpoints []*endpoint.Endpoint, apiVersion, ki Name: name, Namespace: namespace, Annotations: annotations, + Labels: labels, Generation: 1, }, Spec: endpoint.DNSEndpointSpec{ @@ -139,7 +140,9 @@ func testCRDSourceEndpoints(t *testing.T) { expectEndpoints bool expectError bool annotationFilter string + labelFilter string annotations map[string]string + labels map[string]string }{ { title: "invalid crd api version", @@ -308,16 +311,56 @@ func testCRDSourceEndpoints(t *testing.T) { expectEndpoints: true, expectError: false, }, + { + title: "valid crd gvk with label and non matching label filter", + registeredAPIVersion: "test.k8s.io/v1alpha1", + apiVersion: "test.k8s.io/v1alpha1", + registeredKind: "DNSEndpoint", + kind: "DNSEndpoint", + namespace: "foo", + registeredNamespace: "foo", + labels: map[string]string{"test": "that"}, + labelFilter: "test=filter_something_else", + endpoints: []*endpoint.Endpoint{ + {DNSName: "abc.example.org", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + RecordTTL: 180, + }, + }, + expectEndpoints: false, + expectError: false, + }, + { + title: "valid crd gvk with label and matching label filter", + registeredAPIVersion: "test.k8s.io/v1alpha1", + apiVersion: "test.k8s.io/v1alpha1", + registeredKind: "DNSEndpoint", + kind: "DNSEndpoint", + namespace: "foo", + registeredNamespace: "foo", + labels: map[string]string{"test": "that"}, + labelFilter: "test=that", + endpoints: []*endpoint.Endpoint{ + {DNSName: "abc.example.org", + Targets: endpoint.Targets{"1.2.3.4"}, + RecordType: endpoint.RecordTypeA, + RecordTTL: 180, + }, + }, + expectEndpoints: true, + expectError: false, + }, } { t.Run(ti.title, func(t *testing.T) { - restClient := startCRDServerToServeTargets(ti.endpoints, ti.registeredAPIVersion, ti.registeredKind, ti.registeredNamespace, "test", ti.annotations, t) + restClient := startCRDServerToServeTargets(ti.endpoints, ti.registeredAPIVersion, ti.registeredKind, ti.registeredNamespace, "test", ti.annotations, ti.labels, t) groupVersion, err := schema.ParseGroupVersion(ti.apiVersion) require.NoError(t, err) scheme := runtime.NewScheme() addKnownTypes(scheme, groupVersion) - cs, _ := NewCRDSource(restClient, ti.namespace, ti.kind, ti.annotationFilter, scheme) + cs, _ := NewCRDSource(restClient, ti.namespace, ti.kind, ti.annotationFilter, ti.labelFilter, scheme) receivedEndpoints, err := cs.Endpoints(context.Background()) if ti.expectError { diff --git a/source/store.go b/source/store.go index a06571f82..3f3a1321e 100644 --- a/source/store.go +++ b/source/store.go @@ -42,6 +42,7 @@ var ErrSourceNotFound = errors.New("source not found") type Config struct { Namespace string AnnotationFilter string + LabelFilter string FQDNTemplate string CombineFQDNAndAnnotation bool IgnoreHostnameAnnotation bool @@ -247,7 +248,7 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err if err != nil { return nil, err } - return NewCRDSource(crdClient, cfg.Namespace, cfg.CRDSourceKind, cfg.AnnotationFilter, scheme) + return NewCRDSource(crdClient, cfg.Namespace, cfg.CRDSourceKind, cfg.AnnotationFilter, cfg.LabelFilter, scheme) case "skipper-routegroup": apiServerURL := cfg.APIServerURL tokenPath := ""