mirror of
https://github.com/kubernetes-sigs/external-dns.git
synced 2025-12-16 09:11:34 +01:00
Merge pull request #2109 from krmichel/master
Add support for Kong's TCPIngress with a load balancer
This commit is contained in:
commit
c78be00600
95
docs/tutorials/kong.md
Normal file
95
docs/tutorials/kong.md
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
# Configuring ExternalDNS to use the Kong TCPIngress Source
|
||||||
|
This tutorial describes how to configure ExternalDNS to use the Kong TCPIngress source.
|
||||||
|
It is meant to supplement the other provider-specific setup tutorials.
|
||||||
|
|
||||||
|
### Manifest (for clusters without RBAC enabled)
|
||||||
|
```yaml
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: external-dns
|
||||||
|
spec:
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: external-dns
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: external-dns
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: external-dns
|
||||||
|
# update this to the desired external-dns version
|
||||||
|
image: k8s.gcr.io/external-dns/external-dns:v0.9.0
|
||||||
|
args:
|
||||||
|
- --source=kong-tcpingress
|
||||||
|
- --provider=aws
|
||||||
|
- --registry=txt
|
||||||
|
- --txt-owner-id=my-identifier
|
||||||
|
```
|
||||||
|
|
||||||
|
### Manifest (for clusters with RBAC enabled)
|
||||||
|
Could be changed if you have mulitple sources
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: external-dns
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: external-dns
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services","endpoints","pods"]
|
||||||
|
verbs: ["get","watch","list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["list","watch"]
|
||||||
|
- apiGroups: ["configuration.konghq.com"]
|
||||||
|
resources: ["tcpingresses"]
|
||||||
|
verbs: ["get","watch","list"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: external-dns-viewer
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: external-dns
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: external-dns
|
||||||
|
namespace: default
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: external-dns
|
||||||
|
spec:
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: external-dns
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: external-dns
|
||||||
|
spec:
|
||||||
|
serviceAccountName: external-dns
|
||||||
|
containers:
|
||||||
|
- name: external-dns
|
||||||
|
# update this to the desired external-dns version
|
||||||
|
image: k8s.gcr.io/external-dns/external-dns:v0.9.0
|
||||||
|
args:
|
||||||
|
- --source=kong-tcpingress
|
||||||
|
- --provider=aws
|
||||||
|
- --registry=txt
|
||||||
|
- --txt-owner-id=my-identifier
|
||||||
|
```
|
||||||
@ -352,7 +352,7 @@ func (cfg *Config) ParseFlags(args []string) error {
|
|||||||
app.Flag("skipper-routegroup-groupversion", "The resource version for skipper routegroup").Default(source.DefaultRoutegroupVersion).StringVar(&cfg.SkipperRouteGroupVersion)
|
app.Flag("skipper-routegroup-groupversion", "The resource version for skipper routegroup").Default(source.DefaultRoutegroupVersion).StringVar(&cfg.SkipperRouteGroupVersion)
|
||||||
|
|
||||||
// Flags related to processing sources
|
// Flags related to processing sources
|
||||||
app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, contour-httpproxy, gloo-proxy, crd, empty, skipper-routegroup, openshift-route, ambassador-host)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "pod", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "contour-httpproxy", "gloo-proxy", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route", "ambassador-host")
|
app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, contour-httpproxy, gloo-proxy, crd, empty, skipper-routegroup, openshift-route, ambassador-host, kong-tcpingress)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "pod", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "contour-httpproxy", "gloo-proxy", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route", "ambassador-host", "kong-tcpingress")
|
||||||
app.Flag("namespace", "Limit sources of endpoints to a specific namespace (default: all namespaces)").Default(defaultConfig.Namespace).StringVar(&cfg.Namespace)
|
app.Flag("namespace", "Limit sources of endpoints to a specific namespace (default: all namespaces)").Default(defaultConfig.Namespace).StringVar(&cfg.Namespace)
|
||||||
app.Flag("annotation-filter", "Filter sources managed by external-dns via annotation using label selector semantics (default: all sources)").Default(defaultConfig.AnnotationFilter).StringVar(&cfg.AnnotationFilter)
|
app.Flag("annotation-filter", "Filter sources managed by external-dns via annotation using label selector semantics (default: all sources)").Default(defaultConfig.AnnotationFilter).StringVar(&cfg.AnnotationFilter)
|
||||||
app.Flag("label-filter", "Filter sources managed by external-dns via label selector when listing all resources; currently only supported by source CRD").Default(defaultConfig.LabelFilter).StringVar(&cfg.LabelFilter)
|
app.Flag("label-filter", "Filter sources managed by external-dns via label selector when listing all resources; currently only supported by source CRD").Default(defaultConfig.LabelFilter).StringVar(&cfg.LabelFilter)
|
||||||
|
|||||||
455
source/kong_tcpingress.go
Normal file
455
source/kong_tcpingress.go
Normal file
@ -0,0 +1,455 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package source
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/dynamic"
|
||||||
|
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
|
"sigs.k8s.io/external-dns/endpoint"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kongGroupdVersionResource = schema.GroupVersionResource{
|
||||||
|
Group: "configuration.konghq.com",
|
||||||
|
Version: "v1beta1",
|
||||||
|
Resource: "tcpingresses",
|
||||||
|
}
|
||||||
|
|
||||||
|
// kongTCPIngressSource is an implementation of Source for Kong TCPIngress objects.
|
||||||
|
type kongTCPIngressSource struct {
|
||||||
|
annotationFilter string
|
||||||
|
dynamicKubeClient dynamic.Interface
|
||||||
|
kongTCPIngressInformer informers.GenericInformer
|
||||||
|
kubeClient kubernetes.Interface
|
||||||
|
namespace string
|
||||||
|
unstructuredConverter *unstructuredConverter
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKongTCPIngressSource creates a new kongTCPIngressSource with the given config.
|
||||||
|
func NewKongTCPIngressSource(dynamicKubeClient dynamic.Interface, kubeClient kubernetes.Interface, namespace string, annotationFilter string) (Source, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Use shared informer to listen for add/update/delete of Host in the specified namespace.
|
||||||
|
// Set resync period to 0, to prevent processing when nothing has changed.
|
||||||
|
informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicKubeClient, 0, namespace, nil)
|
||||||
|
kongTCPIngressInformer := informerFactory.ForResource(kongGroupdVersionResource)
|
||||||
|
|
||||||
|
// Add default resource event handlers to properly initialize informer.
|
||||||
|
kongTCPIngressInformer.Informer().AddEventHandler(
|
||||||
|
cache.ResourceEventHandlerFuncs{
|
||||||
|
AddFunc: func(obj interface{}) {
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO informer is not explicitly stopped since controller is not passing in its channel.
|
||||||
|
informerFactory.Start(wait.NeverStop)
|
||||||
|
|
||||||
|
// wait for the local cache to be populated.
|
||||||
|
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||||
|
return kongTCPIngressInformer.Informer().HasSynced(), nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to sync cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
uc, err := newKongUnstructuredConverter()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to setup Unstructured Converter")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &kongTCPIngressSource{
|
||||||
|
annotationFilter: annotationFilter,
|
||||||
|
dynamicKubeClient: dynamicKubeClient,
|
||||||
|
kongTCPIngressInformer: kongTCPIngressInformer,
|
||||||
|
kubeClient: kubeClient,
|
||||||
|
namespace: namespace,
|
||||||
|
unstructuredConverter: uc,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoints returns endpoint objects for each host-target combination that should be processed.
|
||||||
|
// Retrieves all TCPIngresses in the source's namespace(s).
|
||||||
|
func (sc *kongTCPIngressSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||||
|
tis, err := sc.kongTCPIngressInformer.Lister().ByNamespace(sc.namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tcpIngresses []*TCPIngress
|
||||||
|
for _, tcpIngressObj := range tis {
|
||||||
|
unstructuredHost, ok := tcpIngressObj.(*unstructured.Unstructured)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("could not convert")
|
||||||
|
}
|
||||||
|
|
||||||
|
tcpIngress := &TCPIngress{}
|
||||||
|
err := sc.unstructuredConverter.scheme.Convert(unstructuredHost, tcpIngress, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tcpIngresses = append(tcpIngresses, tcpIngress)
|
||||||
|
}
|
||||||
|
|
||||||
|
tcpIngresses, err = sc.filterByAnnotations(tcpIngresses)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to filter TCPIngresses")
|
||||||
|
}
|
||||||
|
|
||||||
|
var endpoints []*endpoint.Endpoint
|
||||||
|
for _, tcpIngress := range tcpIngresses {
|
||||||
|
var targets endpoint.Targets
|
||||||
|
for _, lb := range tcpIngress.Status.LoadBalancer.Ingress {
|
||||||
|
if lb.IP != "" {
|
||||||
|
targets = append(targets, lb.IP)
|
||||||
|
}
|
||||||
|
if lb.Hostname != "" {
|
||||||
|
targets = append(targets, lb.Hostname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fullname := fmt.Sprintf("%s/%s", tcpIngress.Namespace, tcpIngress.Name)
|
||||||
|
|
||||||
|
ingressEndpoints, err := sc.endpointsFromTCPIngress(tcpIngress, targets)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(ingressEndpoints) == 0 {
|
||||||
|
log.Debugf("No endpoints could be generated from Host %s", fullname)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Endpoints generated from TCPIngress: %s: %v", fullname, ingressEndpoints)
|
||||||
|
sc.setResourceLabel(tcpIngress, ingressEndpoints)
|
||||||
|
sc.setDualstackLabel(tcpIngress, ingressEndpoints)
|
||||||
|
endpoints = append(endpoints, ingressEndpoints...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ep := range endpoints {
|
||||||
|
sort.Sort(ep.Targets)
|
||||||
|
}
|
||||||
|
|
||||||
|
return endpoints, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterByAnnotations filters a list of TCPIngresses by a given annotation selector.
|
||||||
|
func (sc *kongTCPIngressSource) filterByAnnotations(tcpIngresses []*TCPIngress) ([]*TCPIngress, error) {
|
||||||
|
labelSelector, err := metav1.ParseToLabelSelector(sc.annotationFilter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty filter returns original list
|
||||||
|
if selector.Empty() {
|
||||||
|
return tcpIngresses, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
filteredList := []*TCPIngress{}
|
||||||
|
|
||||||
|
for _, tcpIngress := range tcpIngresses {
|
||||||
|
// convert the TCPIngress's annotations to an equivalent label selector
|
||||||
|
annotations := labels.Set(tcpIngress.Annotations)
|
||||||
|
|
||||||
|
// include TCPIngress if its annotations match the selector
|
||||||
|
if selector.Matches(annotations) {
|
||||||
|
filteredList = append(filteredList, tcpIngress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return filteredList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *kongTCPIngressSource) setResourceLabel(tcpIngress *TCPIngress, endpoints []*endpoint.Endpoint) {
|
||||||
|
for _, ep := range endpoints {
|
||||||
|
ep.Labels[endpoint.ResourceLabelKey] = fmt.Sprintf("tcpingress/%s/%s", tcpIngress.Namespace, tcpIngress.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *kongTCPIngressSource) setDualstackLabel(tcpIngress *TCPIngress, endpoints []*endpoint.Endpoint) {
|
||||||
|
val, ok := tcpIngress.Annotations[ALBDualstackAnnotationKey]
|
||||||
|
if ok && val == ALBDualstackAnnotationValue {
|
||||||
|
log.Debugf("Adding dualstack label to TCPIngress %s/%s.", tcpIngress.Namespace, tcpIngress.Name)
|
||||||
|
for _, ep := range endpoints {
|
||||||
|
ep.Labels[endpoint.DualstackLabelKey] = "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// endpointsFromTCPIngress extracts the endpoints from a TCPIngress object
|
||||||
|
func (sc *kongTCPIngressSource) endpointsFromTCPIngress(tcpIngress *TCPIngress, targets endpoint.Targets) ([]*endpoint.Endpoint, error) {
|
||||||
|
var endpoints []*endpoint.Endpoint
|
||||||
|
|
||||||
|
providerSpecific, setIdentifier := getProviderSpecificAnnotations(tcpIngress.Annotations)
|
||||||
|
|
||||||
|
ttl, err := getTTLFromAnnotations(tcpIngress.Annotations)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hostnameList := getHostnamesFromAnnotations(tcpIngress.Annotations)
|
||||||
|
for _, hostname := range hostnameList {
|
||||||
|
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tcpIngress.Spec.Rules != nil {
|
||||||
|
for _, rule := range tcpIngress.Spec.Rules {
|
||||||
|
if rule.Host != "" {
|
||||||
|
endpoints = append(endpoints, endpointsForHostname(rule.Host, targets, ttl, providerSpecific, setIdentifier)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return endpoints, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *kongTCPIngressSource) AddEventHandler(ctx context.Context, handler func()) {
|
||||||
|
log.Debug("Adding event handler for TCPIngress")
|
||||||
|
|
||||||
|
// Right now there is no way to remove event handler from informer, see:
|
||||||
|
// https://github.com/kubernetes/kubernetes/issues/79610
|
||||||
|
sc.kongTCPIngressInformer.Informer().AddEventHandler(
|
||||||
|
cache.ResourceEventHandlerFuncs{
|
||||||
|
AddFunc: func(obj interface{}) {
|
||||||
|
handler()
|
||||||
|
},
|
||||||
|
UpdateFunc: func(old interface{}, new interface{}) {
|
||||||
|
handler()
|
||||||
|
},
|
||||||
|
DeleteFunc: func(obj interface{}) {
|
||||||
|
handler()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newUnstructuredConverter returns a new unstructuredConverter initialized
|
||||||
|
func newKongUnstructuredConverter() (*unstructuredConverter, error) {
|
||||||
|
uc := &unstructuredConverter{
|
||||||
|
scheme: runtime.NewScheme(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the core types we need
|
||||||
|
uc.scheme.AddKnownTypes(kongGroupdVersionResource.GroupVersion(), &TCPIngress{}, &TCPIngressList{})
|
||||||
|
if err := scheme.AddToScheme(uc.scheme); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return uc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//Kong types based on https://github.com/Kong/kubernetes-ingress-controller/blob/v1.2.0/pkg/apis/configuration/v1beta1/types.go to facilitate testing
|
||||||
|
//When trying to import them from the Kong repo as a dependency it required upgrading the k8s.io/client-go and k8s.io/apimachinery which seemed
|
||||||
|
//cause several changes in how the mock clients were working that resulted in a bunch of failures in other tests
|
||||||
|
//If that is dealt with at some point the below can be removed and replaced with an actual import
|
||||||
|
|
||||||
|
type TCPIngress struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Spec tcpIngressSpec `json:"spec,omitempty"`
|
||||||
|
Status tcpIngressStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TCPIngressList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []TCPIngress `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type tcpIngressSpec struct {
|
||||||
|
Rules []tcpIngressRule `json:"rules,omitempty"`
|
||||||
|
TLS []tcpIngressTLS `json:"tls,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type tcpIngressTLS struct {
|
||||||
|
Hosts []string `json:"hosts,omitempty"`
|
||||||
|
SecretName string `json:"secretName,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type tcpIngressStatus struct {
|
||||||
|
LoadBalancer corev1.LoadBalancerStatus `json:"loadBalancer,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type tcpIngressRule struct {
|
||||||
|
Host string `json:"host,omitempty"`
|
||||||
|
Port int `json:"port,omitempty"`
|
||||||
|
Backend tcpIngressBackend `json:"backend"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type tcpIngressBackend struct {
|
||||||
|
ServiceName string `json:"serviceName"`
|
||||||
|
ServicePort int `json:"servicePort"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressBackend) DeepCopyInto(out *tcpIngressBackend) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressBackend) DeepCopy() *tcpIngressBackend {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(tcpIngressBackend)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressRule) DeepCopyInto(out *tcpIngressRule) {
|
||||||
|
*out = *in
|
||||||
|
out.Backend = in.Backend
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressRule) DeepCopy() *tcpIngressRule {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(tcpIngressRule)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressSpec) DeepCopyInto(out *tcpIngressSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Rules != nil {
|
||||||
|
in, out := &in.Rules, &out.Rules
|
||||||
|
*out = make([]tcpIngressRule, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.TLS != nil {
|
||||||
|
in, out := &in.TLS, &out.TLS
|
||||||
|
*out = make([]tcpIngressTLS, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressSpec) DeepCopy() *tcpIngressSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(tcpIngressSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressStatus) DeepCopyInto(out *tcpIngressStatus) {
|
||||||
|
*out = *in
|
||||||
|
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressStatus) DeepCopy() *tcpIngressStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(tcpIngressStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressTLS) DeepCopyInto(out *tcpIngressTLS) {
|
||||||
|
*out = *in
|
||||||
|
if in.Hosts != nil {
|
||||||
|
in, out := &in.Hosts, &out.Hosts
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *tcpIngressTLS) DeepCopy() *tcpIngressTLS {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(tcpIngressTLS)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *TCPIngress) DeepCopyInto(out *TCPIngress) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *TCPIngress) DeepCopy() *TCPIngress {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(TCPIngress)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *TCPIngress) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *TCPIngressList) DeepCopyInto(out *TCPIngressList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]TCPIngress, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *TCPIngressList) DeepCopy() *TCPIngressList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(TCPIngressList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *TCPIngressList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
253
source/kong_tcpingress_test.go
Normal file
253
source/kong_tcpingress_test.go
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package source
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
fakeDynamic "k8s.io/client-go/dynamic/fake"
|
||||||
|
fakeKube "k8s.io/client-go/kubernetes/fake"
|
||||||
|
"sigs.k8s.io/external-dns/endpoint"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time validation that glooSource is a Source.
|
||||||
|
var _ Source = &kongTCPIngressSource{}
|
||||||
|
|
||||||
|
const defaultKongNamespace = "kong"
|
||||||
|
|
||||||
|
func TestKongTCPIngressEndpoints(t *testing.T) {
|
||||||
|
for _, ti := range []struct {
|
||||||
|
title string
|
||||||
|
tcpProxy TCPIngress
|
||||||
|
expected []*endpoint.Endpoint
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
title: "TCPIngress with hostname annotation",
|
||||||
|
tcpProxy: TCPIngress{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: kongGroupdVersionResource.GroupVersion().String(),
|
||||||
|
Kind: "TCPIngress",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "tcp-ingress-annotation",
|
||||||
|
Namespace: defaultKongNamespace,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"external-dns.alpha.kubernetes.io/hostname": "a.example.com",
|
||||||
|
"kubernetes.io/ingress.class": "kong",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: tcpIngressSpec{
|
||||||
|
Rules: []tcpIngressRule{
|
||||||
|
{
|
||||||
|
Port: 30000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Port: 30001,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: tcpIngressStatus{
|
||||||
|
LoadBalancer: corev1.LoadBalancerStatus{
|
||||||
|
Ingress: []corev1.LoadBalancerIngress{
|
||||||
|
{
|
||||||
|
Hostname: "a691234567a314e71861a4303f06a3bd-1291189659.us-east-1.elb.amazonaws.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: []*endpoint.Endpoint{
|
||||||
|
{
|
||||||
|
DNSName: "a.example.com",
|
||||||
|
Targets: []string{"a691234567a314e71861a4303f06a3bd-1291189659.us-east-1.elb.amazonaws.com"},
|
||||||
|
RecordType: endpoint.RecordTypeCNAME,
|
||||||
|
RecordTTL: 0,
|
||||||
|
Labels: endpoint.Labels{
|
||||||
|
"resource": "tcpingress/kong/tcp-ingress-annotation",
|
||||||
|
},
|
||||||
|
ProviderSpecific: endpoint.ProviderSpecific{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "TCPIngress using SNI",
|
||||||
|
tcpProxy: TCPIngress{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: kongGroupdVersionResource.GroupVersion().String(),
|
||||||
|
Kind: "TCPIngress",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "tcp-ingress-sni",
|
||||||
|
Namespace: defaultKongNamespace,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"kubernetes.io/ingress.class": "kong",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: tcpIngressSpec{
|
||||||
|
Rules: []tcpIngressRule{
|
||||||
|
{
|
||||||
|
Port: 30002,
|
||||||
|
Host: "b.example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Port: 30003,
|
||||||
|
Host: "c.example.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: tcpIngressStatus{
|
||||||
|
LoadBalancer: corev1.LoadBalancerStatus{
|
||||||
|
Ingress: []corev1.LoadBalancerIngress{
|
||||||
|
{
|
||||||
|
Hostname: "a123456769a314e71861a4303f06a3bd-1291189659.us-east-1.elb.amazonaws.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: []*endpoint.Endpoint{
|
||||||
|
{
|
||||||
|
DNSName: "b.example.com",
|
||||||
|
Targets: []string{"a123456769a314e71861a4303f06a3bd-1291189659.us-east-1.elb.amazonaws.com"},
|
||||||
|
RecordType: endpoint.RecordTypeCNAME,
|
||||||
|
RecordTTL: 0,
|
||||||
|
Labels: endpoint.Labels{
|
||||||
|
"resource": "tcpingress/kong/tcp-ingress-sni",
|
||||||
|
},
|
||||||
|
ProviderSpecific: endpoint.ProviderSpecific{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
DNSName: "c.example.com",
|
||||||
|
Targets: []string{"a123456769a314e71861a4303f06a3bd-1291189659.us-east-1.elb.amazonaws.com"},
|
||||||
|
RecordType: endpoint.RecordTypeCNAME,
|
||||||
|
Labels: endpoint.Labels{
|
||||||
|
"resource": "tcpingress/kong/tcp-ingress-sni",
|
||||||
|
},
|
||||||
|
ProviderSpecific: endpoint.ProviderSpecific{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "TCPIngress with hostname annotation and using SNI",
|
||||||
|
tcpProxy: TCPIngress{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: kongGroupdVersionResource.GroupVersion().String(),
|
||||||
|
Kind: "TCPIngress",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "tcp-ingress-both",
|
||||||
|
Namespace: defaultKongNamespace,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"external-dns.alpha.kubernetes.io/hostname": "d.example.com",
|
||||||
|
"kubernetes.io/ingress.class": "kong",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: tcpIngressSpec{
|
||||||
|
Rules: []tcpIngressRule{
|
||||||
|
{
|
||||||
|
Port: 30004,
|
||||||
|
Host: "e.example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Port: 30005,
|
||||||
|
Host: "f.example.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: tcpIngressStatus{
|
||||||
|
LoadBalancer: corev1.LoadBalancerStatus{
|
||||||
|
Ingress: []corev1.LoadBalancerIngress{
|
||||||
|
{
|
||||||
|
Hostname: "a12e71861a4303f063456769a314a3bd-1291189659.us-east-1.elb.amazonaws.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: []*endpoint.Endpoint{
|
||||||
|
{
|
||||||
|
DNSName: "d.example.com",
|
||||||
|
Targets: []string{"a12e71861a4303f063456769a314a3bd-1291189659.us-east-1.elb.amazonaws.com"},
|
||||||
|
RecordType: endpoint.RecordTypeCNAME,
|
||||||
|
RecordTTL: 0,
|
||||||
|
Labels: endpoint.Labels{
|
||||||
|
"resource": "tcpingress/kong/tcp-ingress-both",
|
||||||
|
},
|
||||||
|
ProviderSpecific: endpoint.ProviderSpecific{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
DNSName: "e.example.com",
|
||||||
|
Targets: []string{"a12e71861a4303f063456769a314a3bd-1291189659.us-east-1.elb.amazonaws.com"},
|
||||||
|
RecordType: endpoint.RecordTypeCNAME,
|
||||||
|
RecordTTL: 0,
|
||||||
|
Labels: endpoint.Labels{
|
||||||
|
"resource": "tcpingress/kong/tcp-ingress-both",
|
||||||
|
},
|
||||||
|
ProviderSpecific: endpoint.ProviderSpecific{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
DNSName: "f.example.com",
|
||||||
|
Targets: []string{"a12e71861a4303f063456769a314a3bd-1291189659.us-east-1.elb.amazonaws.com"},
|
||||||
|
RecordType: endpoint.RecordTypeCNAME,
|
||||||
|
RecordTTL: 0,
|
||||||
|
Labels: endpoint.Labels{
|
||||||
|
"resource": "tcpingress/kong/tcp-ingress-both",
|
||||||
|
},
|
||||||
|
ProviderSpecific: endpoint.ProviderSpecific{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(ti.title, func(t *testing.T) {
|
||||||
|
fakeKubernetesClient := fakeKube.NewSimpleClientset()
|
||||||
|
scheme := runtime.NewScheme()
|
||||||
|
scheme.AddKnownTypes(kongGroupdVersionResource.GroupVersion(), &TCPIngress{}, &TCPIngressList{})
|
||||||
|
fakeDynamicClient := fakeDynamic.NewSimpleDynamicClient(scheme)
|
||||||
|
|
||||||
|
tcpi := unstructured.Unstructured{}
|
||||||
|
|
||||||
|
tcpIngressAsJSON, err := json.Marshal(ti.tcpProxy)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.NoError(t, tcpi.UnmarshalJSON(tcpIngressAsJSON))
|
||||||
|
|
||||||
|
// Create proxy resources
|
||||||
|
_, err = fakeDynamicClient.Resource(kongGroupdVersionResource).Namespace(defaultKongNamespace).Create(context.Background(), &tcpi, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
source, err := NewKongTCPIngressSource(fakeDynamicClient, fakeKubernetesClient, defaultKongNamespace, "kubernetes.io/ingress.class=kong")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, source)
|
||||||
|
|
||||||
|
count := &unstructured.UnstructuredList{}
|
||||||
|
for len(count.Items) < 1 {
|
||||||
|
count, _ = fakeDynamicClient.Resource(kongGroupdVersionResource).Namespace(defaultKongNamespace).List(context.Background(), metav1.ListOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoints, err := source.Endpoints(context.Background())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, endpoints, len(ti.expected))
|
||||||
|
assert.Equal(t, endpoints, ti.expected)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -288,6 +288,16 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err
|
|||||||
token = restConfig.BearerToken
|
token = restConfig.BearerToken
|
||||||
}
|
}
|
||||||
return NewRouteGroupSource(cfg.RequestTimeout, token, tokenPath, apiServerURL, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.SkipperRouteGroupVersion, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation)
|
return NewRouteGroupSource(cfg.RequestTimeout, token, tokenPath, apiServerURL, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.SkipperRouteGroupVersion, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation)
|
||||||
|
case "kong-tcpingress":
|
||||||
|
kubernetesClient, err := p.KubeClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dynamicClient, err := p.DynamicKubernetesClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewKongTCPIngressSource(dynamicClient, kubernetesClient, cfg.Namespace, cfg.AnnotationFilter)
|
||||||
}
|
}
|
||||||
return nil, ErrSourceNotFound
|
return nil, ErrSourceNotFound
|
||||||
}
|
}
|
||||||
|
|||||||
@ -96,9 +96,9 @@ func (suite *ByNamesTestSuite) TestAllInitialized() {
|
|||||||
mockClientGenerator.On("IstioClient").Return(NewFakeConfigStore(), nil)
|
mockClientGenerator.On("IstioClient").Return(NewFakeConfigStore(), nil)
|
||||||
mockClientGenerator.On("DynamicKubernetesClient").Return(fakeDynamic, nil)
|
mockClientGenerator.On("DynamicKubernetesClient").Return(fakeDynamic, nil)
|
||||||
|
|
||||||
sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-ingressroute", "contour-httpproxy", "fake"}, minimalConfig)
|
sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-ingressroute", "contour-httpproxy", "kong-tcpingress", "fake"}, minimalConfig)
|
||||||
suite.NoError(err, "should not generate errors")
|
suite.NoError(err, "should not generate errors")
|
||||||
suite.Len(sources, 6, "should generate all six sources")
|
suite.Len(sources, 7, "should generate all six sources")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ByNamesTestSuite) TestOnlyFake() {
|
func (suite *ByNamesTestSuite) TestOnlyFake() {
|
||||||
@ -135,6 +135,9 @@ func (suite *ByNamesTestSuite) TestKubeClientFails() {
|
|||||||
|
|
||||||
_, err = ByNames(mockClientGenerator, []string{"contour-ingressroute"}, minimalConfig)
|
_, err = ByNames(mockClientGenerator, []string{"contour-ingressroute"}, minimalConfig)
|
||||||
suite.Error(err, "should return an error if kubernetes client cannot be created")
|
suite.Error(err, "should return an error if kubernetes client cannot be created")
|
||||||
|
|
||||||
|
_, err = ByNames(mockClientGenerator, []string{"kong-tcpingress"}, minimalConfig)
|
||||||
|
suite.Error(err, "should return an error if kubernetes client cannot be created")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ByNamesTestSuite) TestIstioClientFails() {
|
func (suite *ByNamesTestSuite) TestIstioClientFails() {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user