mirror of
https://github.com/kubernetes-sigs/external-dns.git
synced 2025-08-06 09:36:58 +02:00
Contour HTTPProxy support
This commit is contained in:
parent
3a61439cd1
commit
5beb528c32
@ -300,7 +300,7 @@ func (cfg *Config) ParseFlags(args []string) error {
|
||||
app.Flag("skipper-routegroup-groupversion", "The resource version for skipper routegroup").Default(source.DefaultRoutegroupVersion).StringVar(&cfg.SkipperRouteGroupVersion)
|
||||
|
||||
// Flags related to processing sources
|
||||
app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, crd, empty, skipper-routegroup,openshift-route)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route")
|
||||
app.Flag("source", "The resource types that are queried for endpoints; specify multiple times for multiple sources (required, options: service, ingress, node, fake, connector, istio-gateway, istio-virtualservice, cloudfoundry, contour-ingressroute, contour-httpproxy, crd, empty, skipper-routegroup,openshift-route)").Required().PlaceHolder("source").EnumsVar(&cfg.Sources, "service", "ingress", "node", "istio-gateway", "istio-virtualservice", "cloudfoundry", "contour-ingressroute", "contour-httpproxy", "fake", "connector", "crd", "empty", "skipper-routegroup", "openshift-route")
|
||||
|
||||
app.Flag("namespace", "Limit sources of endpoints to a specific namespace (default: all namespaces)").Default(defaultConfig.Namespace).StringVar(&cfg.Namespace)
|
||||
app.Flag("annotation-filter", "Filter sources managed by external-dns via annotation using label selector semantics (default: all sources)").Default(defaultConfig.AnnotationFilter).StringVar(&cfg.AnnotationFilter)
|
||||
|
340
source/httpproxy.go
Normal file
340
source/httpproxy.go
Normal file
@ -0,0 +1,340 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package source
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
)
|
||||
|
||||
// HTTPProxySource is an implementation of Source for ProjectContour HTTPProxy objects.
|
||||
// The HTTPProxy implementation uses the spec.virtualHost.fqdn value for the hostname.
|
||||
// Use targetAnnotationKey to explicitly set Endpoint.
|
||||
type httpProxySource struct {
|
||||
dynamicKubeClient dynamic.Interface
|
||||
namespace string
|
||||
annotationFilter string
|
||||
fqdnTemplate *template.Template
|
||||
combineFQDNAnnotation bool
|
||||
ignoreHostnameAnnotation bool
|
||||
httpProxyInformer informers.GenericInformer
|
||||
unstructuredConverter *UnstructuredConverter
|
||||
}
|
||||
|
||||
// NewContourHTTPProxySource creates a new contourHTTPProxySource with the given config.
|
||||
func NewContourHTTPProxySource(
|
||||
dynamicKubeClient dynamic.Interface,
|
||||
namespace string,
|
||||
annotationFilter string,
|
||||
fqdnTemplate string,
|
||||
combineFqdnAnnotation bool,
|
||||
ignoreHostnameAnnotation bool,
|
||||
) (Source, error) {
|
||||
var (
|
||||
tmpl *template.Template
|
||||
err error
|
||||
)
|
||||
if fqdnTemplate != "" {
|
||||
tmpl, err = template.New("endpoint").Funcs(template.FuncMap{
|
||||
"trimPrefix": strings.TrimPrefix,
|
||||
}).Parse(fqdnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared informer to listen for add/update/delete of HTTPProxys in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed.
|
||||
informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicKubeClient, 0, namespace, nil)
|
||||
httpProxyInformer := informerFactory.ForResource(projectcontour.HTTPProxyGVR)
|
||||
|
||||
// Add default resource event handlers to properly initialize informer.
|
||||
httpProxyInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// TODO informer is not explicitly stopped since controller is not passing in its channel.
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
// wait for the local cache to be populated.
|
||||
err = poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
return httpProxyInformer.Informer().HasSynced(), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sync cache: %v", err)
|
||||
}
|
||||
|
||||
uc, err := NewUnstructuredConverter()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to setup Unstructured Converter: %v", err)
|
||||
}
|
||||
|
||||
return &httpProxySource{
|
||||
dynamicKubeClient: dynamicKubeClient,
|
||||
namespace: namespace,
|
||||
annotationFilter: annotationFilter,
|
||||
fqdnTemplate: tmpl,
|
||||
combineFQDNAnnotation: combineFqdnAnnotation,
|
||||
ignoreHostnameAnnotation: ignoreHostnameAnnotation,
|
||||
httpProxyInformer: httpProxyInformer,
|
||||
unstructuredConverter: uc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Endpoints returns endpoint objects for each host-target combination that should be processed.
|
||||
// Retrieves all HTTPProxy resources in the source's namespace(s).
|
||||
func (sc *httpProxySource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) {
|
||||
hps, err := sc.httpProxyInformer.Lister().ByNamespace(sc.namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert to []*projectcontour.HTTPProxy
|
||||
var httpProxies []*projectcontour.HTTPProxy
|
||||
for _, hp := range hps {
|
||||
unstrucuredHP, ok := hp.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert")
|
||||
}
|
||||
|
||||
hpConverted := &projectcontour.HTTPProxy{}
|
||||
err := sc.unstructuredConverter.scheme.Convert(unstrucuredHP, hpConverted, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpProxies = append(httpProxies, hpConverted)
|
||||
}
|
||||
|
||||
httpProxies, err = sc.filterByAnnotations(httpProxies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoints := []*endpoint.Endpoint{}
|
||||
|
||||
for _, hp := range httpProxies {
|
||||
// Check controller annotation to see if we are responsible.
|
||||
controller, ok := hp.Annotations[controllerAnnotationKey]
|
||||
if ok && controller != controllerAnnotationValue {
|
||||
log.Debugf("Skipping HTTPProxy %s/%s because controller value does not match, found: %s, required: %s",
|
||||
hp.Namespace, hp.Name, controller, controllerAnnotationValue)
|
||||
continue
|
||||
} else if hp.Status.CurrentStatus != "valid" {
|
||||
log.Debugf("Skipping HTTPProxy %s/%s because it is not valid", hp.Namespace, hp.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
hpEndpoints, err := sc.endpointsFromHTTPProxy(hp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply template if fqdn is missing on HTTPProxy
|
||||
if (sc.combineFQDNAnnotation || len(hpEndpoints) == 0) && sc.fqdnTemplate != nil {
|
||||
tmplEndpoints, err := sc.endpointsFromTemplate(hp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sc.combineFQDNAnnotation {
|
||||
hpEndpoints = append(hpEndpoints, tmplEndpoints...)
|
||||
} else {
|
||||
hpEndpoints = tmplEndpoints
|
||||
}
|
||||
}
|
||||
|
||||
if len(hpEndpoints) == 0 {
|
||||
log.Debugf("No endpoints could be generated from HTTPProxy %s/%s", hp.Namespace, hp.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Endpoints generated from HTTPProxy: %s/%s: %v", hp.Namespace, hp.Name, hpEndpoints)
|
||||
sc.setResourceLabel(hp, hpEndpoints)
|
||||
endpoints = append(endpoints, hpEndpoints...)
|
||||
}
|
||||
|
||||
for _, ep := range endpoints {
|
||||
sort.Sort(ep.Targets)
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (sc *httpProxySource) endpointsFromTemplate(httpProxy *projectcontour.HTTPProxy) ([]*endpoint.Endpoint, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, httpProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply template on HTTPProxy %s/%s: %v", httpProxy.Namespace, httpProxy.Name, err)
|
||||
}
|
||||
|
||||
hostnames := buf.String()
|
||||
|
||||
ttl, err := getTTLFromAnnotations(httpProxy.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
targets := getTargetsFromTargetAnnotation(httpProxy.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
for _, lb := range httpProxy.Status.LoadBalancer.Ingress {
|
||||
if lb.IP != "" {
|
||||
targets = append(targets, lb.IP)
|
||||
}
|
||||
if lb.Hostname != "" {
|
||||
targets = append(targets, lb.Hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(httpProxy.Annotations)
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
// splits the FQDN template and removes the trailing periods
|
||||
hostnameList := strings.Split(strings.Replace(hostnames, " ", "", -1), ",")
|
||||
for _, hostname := range hostnameList {
|
||||
hostname = strings.TrimSuffix(hostname, ".")
|
||||
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// filterByAnnotations filters a list of configs by a given annotation selector.
|
||||
func (sc *httpProxySource) filterByAnnotations(httpProxies []*projectcontour.HTTPProxy) ([]*projectcontour.HTTPProxy, error) {
|
||||
labelSelector, err := metav1.ParseToLabelSelector(sc.annotationFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// empty filter returns original list
|
||||
if selector.Empty() {
|
||||
return httpProxies, nil
|
||||
}
|
||||
|
||||
filteredList := []*projectcontour.HTTPProxy{}
|
||||
|
||||
for _, httpProxy := range httpProxies {
|
||||
// convert the HTTPProxy's annotations to an equivalent label selector
|
||||
annotations := labels.Set(httpProxy.Annotations)
|
||||
|
||||
// include HTTPProxy if its annotations match the selector
|
||||
if selector.Matches(annotations) {
|
||||
filteredList = append(filteredList, httpProxy)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredList, nil
|
||||
}
|
||||
|
||||
func (sc *httpProxySource) setResourceLabel(httpProxy *projectcontour.HTTPProxy, endpoints []*endpoint.Endpoint) {
|
||||
for _, ep := range endpoints {
|
||||
ep.Labels[endpoint.ResourceLabelKey] = fmt.Sprintf("HTTPProxy/%s/%s", httpProxy.Namespace, httpProxy.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// endpointsFromHTTPProxyConfig extracts the endpoints from a Contour HTTPProxy object
|
||||
func (sc *httpProxySource) endpointsFromHTTPProxy(httpProxy *projectcontour.HTTPProxy) ([]*endpoint.Endpoint, error) {
|
||||
if httpProxy.Status.CurrentStatus != "valid" {
|
||||
log.Warn(errors.Errorf("cannot generate endpoints for HTTPProxy with status %s", httpProxy.Status.CurrentStatus))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var endpoints []*endpoint.Endpoint
|
||||
|
||||
ttl, err := getTTLFromAnnotations(httpProxy.Annotations)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
}
|
||||
|
||||
targets := getTargetsFromTargetAnnotation(httpProxy.Annotations)
|
||||
|
||||
if len(targets) == 0 {
|
||||
for _, lb := range httpProxy.Status.LoadBalancer.Ingress {
|
||||
if lb.IP != "" {
|
||||
targets = append(targets, lb.IP)
|
||||
}
|
||||
if lb.Hostname != "" {
|
||||
targets = append(targets, lb.Hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
providerSpecific, setIdentifier := getProviderSpecificAnnotations(httpProxy.Annotations)
|
||||
|
||||
if virtualHost := httpProxy.Spec.VirtualHost; virtualHost != nil {
|
||||
if fqdn := virtualHost.Fqdn; fqdn != "" {
|
||||
endpoints = append(endpoints, endpointsForHostname(fqdn, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Skip endpoints if we do not want entries from annotations
|
||||
if !sc.ignoreHostnameAnnotation {
|
||||
hostnameList := getHostnamesFromAnnotations(httpProxy.Annotations)
|
||||
for _, hostname := range hostnameList {
|
||||
endpoints = append(endpoints, endpointsForHostname(hostname, targets, ttl, providerSpecific, setIdentifier)...)
|
||||
}
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (sc *httpProxySource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
log.Debug("Adding event handler for httpproxy")
|
||||
|
||||
// Right now there is no way to remove event handler from informer, see:
|
||||
// https://github.com/kubernetes/kubernetes/issues/79610
|
||||
sc.httpProxyInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
handler()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
handler()
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
1081
source/httpproxy_test.go
Normal file
1081
source/httpproxy_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@ -26,18 +26,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
contourapi "github.com/projectcontour/contour/apis/contour/v1beta1"
|
||||
contour "github.com/projectcontour/contour/apis/contour/v1beta1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"sigs.k8s.io/external-dns/endpoint"
|
||||
@ -90,7 +88,7 @@ func NewContourIngressRouteSource(
|
||||
// Use shared informer to listen for add/update/delete of ingressroutes in the specified namespace.
|
||||
// Set resync period to 0, to prevent processing when nothing has changed.
|
||||
informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicKubeClient, 0, namespace, nil)
|
||||
ingressRouteInformer := informerFactory.ForResource(contourapi.IngressRouteGVR)
|
||||
ingressRouteInformer := informerFactory.ForResource(contour.IngressRouteGVR)
|
||||
|
||||
// Add default resource event handlers to properly initialize informer.
|
||||
ingressRouteInformer.Informer().AddEventHandler(
|
||||
@ -138,15 +136,15 @@ func (sc *ingressRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert to []*contourapi.IngressRoute
|
||||
var ingressRoutes []*contourapi.IngressRoute
|
||||
// Convert to []*contour.IngressRoute
|
||||
var ingressRoutes []*contour.IngressRoute
|
||||
for _, ir := range irs {
|
||||
unstrucuredIR, ok := ir.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert")
|
||||
}
|
||||
|
||||
irConverted := &contourapi.IngressRoute{}
|
||||
irConverted := &contour.IngressRoute{}
|
||||
err := sc.unstructuredConverter.scheme.Convert(unstrucuredIR, irConverted, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -209,7 +207,7 @@ func (sc *ingressRouteSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoi
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (sc *ingressRouteSource) endpointsFromTemplate(ctx context.Context, ingressRoute *contourapi.IngressRoute) ([]*endpoint.Endpoint, error) {
|
||||
func (sc *ingressRouteSource) endpointsFromTemplate(ctx context.Context, ingressRoute *contour.IngressRoute) ([]*endpoint.Endpoint, error) {
|
||||
// Process the whole template string
|
||||
var buf bytes.Buffer
|
||||
err := sc.fqdnTemplate.Execute(&buf, ingressRoute)
|
||||
@ -246,7 +244,7 @@ func (sc *ingressRouteSource) endpointsFromTemplate(ctx context.Context, ingress
|
||||
}
|
||||
|
||||
// filterByAnnotations filters a list of configs by a given annotation selector.
|
||||
func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contourapi.IngressRoute) ([]*contourapi.IngressRoute, error) {
|
||||
func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contour.IngressRoute) ([]*contour.IngressRoute, error) {
|
||||
labelSelector, err := metav1.ParseToLabelSelector(sc.annotationFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -261,7 +259,7 @@ func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contourapi.In
|
||||
return ingressRoutes, nil
|
||||
}
|
||||
|
||||
filteredList := []*contourapi.IngressRoute{}
|
||||
filteredList := []*contour.IngressRoute{}
|
||||
|
||||
for _, ingressRoute := range ingressRoutes {
|
||||
// convert the ingressroute's annotations to an equivalent label selector
|
||||
@ -276,7 +274,7 @@ func (sc *ingressRouteSource) filterByAnnotations(ingressRoutes []*contourapi.In
|
||||
return filteredList, nil
|
||||
}
|
||||
|
||||
func (sc *ingressRouteSource) setResourceLabel(ingressRoute *contourapi.IngressRoute, endpoints []*endpoint.Endpoint) {
|
||||
func (sc *ingressRouteSource) setResourceLabel(ingressRoute *contour.IngressRoute, endpoints []*endpoint.Endpoint) {
|
||||
for _, ep := range endpoints {
|
||||
ep.Labels[endpoint.ResourceLabelKey] = fmt.Sprintf("ingressroute/%s/%s", ingressRoute.Namespace, ingressRoute.Name)
|
||||
}
|
||||
@ -304,7 +302,7 @@ func (sc *ingressRouteSource) targetsFromContourLoadBalancer(ctx context.Context
|
||||
}
|
||||
|
||||
// endpointsFromIngressRouteConfig extracts the endpoints from a Contour IngressRoute object
|
||||
func (sc *ingressRouteSource) endpointsFromIngressRoute(ctx context.Context, ingressRoute *contourapi.IngressRoute) ([]*endpoint.Endpoint, error) {
|
||||
func (sc *ingressRouteSource) endpointsFromIngressRoute(ctx context.Context, ingressRoute *contour.IngressRoute) ([]*endpoint.Endpoint, error) {
|
||||
if ingressRoute.CurrentStatus != "valid" {
|
||||
log.Warn(errors.Errorf("cannot generate endpoints for ingressroute with status %s", ingressRoute.CurrentStatus))
|
||||
return nil, nil
|
||||
@ -358,26 +356,3 @@ func parseContourLoadBalancerService(service string) (namespace, name string, er
|
||||
|
||||
func (sc *ingressRouteSource) AddEventHandler(ctx context.Context, handler func()) {
|
||||
}
|
||||
|
||||
// UnstructuredConverter handles conversions between unstructured.Unstructured and Contour types
|
||||
type UnstructuredConverter struct {
|
||||
// scheme holds an initializer for converting Unstructured to a type
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// NewUnstructuredConverter returns a new UnstructuredConverter initialized
|
||||
func NewUnstructuredConverter() (*UnstructuredConverter, error) {
|
||||
uc := &UnstructuredConverter{
|
||||
scheme: runtime.NewScheme(),
|
||||
}
|
||||
|
||||
// Setup converter to understand custom CRD types
|
||||
contourapi.AddKnownTypes(uc.scheme)
|
||||
|
||||
// Add the core types we need
|
||||
if err := scheme.AddToScheme(uc.scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uc, nil
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ func (suite *IngressRouteSuite) SetupTest() {
|
||||
}).IngressRoute()
|
||||
|
||||
// Convert to unstructured
|
||||
unstructuredIngressRoute, err := convertToUnstructured(suite.ingressRoute, s)
|
||||
unstructuredIngressRoute, err := convertIngressRouteToUnstructured(suite.ingressRoute, s)
|
||||
if err != nil {
|
||||
suite.Error(err)
|
||||
}
|
||||
@ -98,11 +98,12 @@ func (suite *IngressRouteSuite) TestResourceLabelIsSet() {
|
||||
|
||||
func newDynamicKubernetesClient() (*fakeDynamic.FakeDynamicClient, *runtime.Scheme) {
|
||||
s := runtime.NewScheme()
|
||||
contour.AddKnownTypes(s)
|
||||
_ = contour.AddToScheme(s)
|
||||
_ = projectcontour.AddToScheme(s)
|
||||
return fakeDynamic.NewSimpleDynamicClient(s), s
|
||||
}
|
||||
|
||||
func convertToUnstructured(ir *contour.IngressRoute, s *runtime.Scheme) (*unstructured.Unstructured, error) {
|
||||
func convertIngressRouteToUnstructured(ir *contour.IngressRoute, s *runtime.Scheme) (*unstructured.Unstructured, error) {
|
||||
unstructuredIngressRoute := &unstructured.Unstructured{}
|
||||
if err := s.Convert(ir, unstructuredIngressRoute, context.Background()); err != nil {
|
||||
return nil, err
|
||||
@ -1013,7 +1014,7 @@ func testIngressRouteEndpoints(t *testing.T) {
|
||||
|
||||
fakeDynamicClient, scheme := newDynamicKubernetesClient()
|
||||
for _, ingressRoute := range ingressRoutes {
|
||||
converted, err := convertToUnstructured(ingressRoute, scheme)
|
||||
converted, err := convertIngressRouteToUnstructured(ingressRoute, scheme)
|
||||
require.NoError(t, err)
|
||||
_, err = fakeDynamicClient.Resource(contour.IngressRouteGVR).Namespace(ingressRoute.Namespace).Create(context.Background(), converted, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
@ -221,6 +221,12 @@ func BuildWithConfig(source string, p ClientGenerator, cfg *Config) (Source, err
|
||||
return nil, err
|
||||
}
|
||||
return NewContourIngressRouteSource(dynamicClient, kubernetesClient, cfg.ContourLoadBalancerService, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation)
|
||||
case "contour-httpproxy":
|
||||
dynamicClient, err := p.DynamicKubernetesClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewContourHTTPProxySource(dynamicClient, cfg.Namespace, cfg.AnnotationFilter, cfg.FQDNTemplate, cfg.CombineFQDNAndAnnotation, cfg.IgnoreHostnameAnnotation)
|
||||
case "openshift-route":
|
||||
ocpClient, err := p.OpenShiftClient()
|
||||
if err != nil {
|
||||
|
@ -96,9 +96,9 @@ func (suite *ByNamesTestSuite) TestAllInitialized() {
|
||||
mockClientGenerator.On("IstioClient").Return(NewFakeConfigStore(), nil)
|
||||
mockClientGenerator.On("DynamicKubernetesClient").Return(fakeDynamic, nil)
|
||||
|
||||
sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-ingressroute", "fake"}, minimalConfig)
|
||||
sources, err := ByNames(mockClientGenerator, []string{"service", "ingress", "istio-gateway", "contour-ingressroute", "contour-httpproxy", "fake"}, minimalConfig)
|
||||
suite.NoError(err, "should not generate errors")
|
||||
suite.Len(sources, 5, "should generate all five sources")
|
||||
suite.Len(sources, 6, "should generate all six sources")
|
||||
}
|
||||
|
||||
func (suite *ByNamesTestSuite) TestOnlyFake() {
|
||||
@ -148,6 +148,8 @@ func (suite *ByNamesTestSuite) TestIstioClientFails() {
|
||||
|
||||
_, err = ByNames(mockClientGenerator, []string{"contour-ingressroute"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if contour client cannot be created")
|
||||
_, err = ByNames(mockClientGenerator, []string{"contour-httpproxy"}, minimalConfig)
|
||||
suite.Error(err, "should return an error if contour client cannot be created")
|
||||
}
|
||||
|
||||
func TestByNames(t *testing.T) {
|
||||
|
32
source/unstructured_converter.go
Normal file
32
source/unstructured_converter.go
Normal file
@ -0,0 +1,32 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
contour "github.com/projectcontour/contour/apis/contour/v1beta1"
|
||||
projectcontour "github.com/projectcontour/contour/apis/projectcontour/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
// UnstructuredConverter handles conversions between unstructured.Unstructured and Contour types
|
||||
type UnstructuredConverter struct {
|
||||
// scheme holds an initializer for converting Unstructured to a type
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// NewUnstructuredConverter returns a new UnstructuredConverter initialized
|
||||
func NewUnstructuredConverter() (*UnstructuredConverter, error) {
|
||||
uc := &UnstructuredConverter{
|
||||
scheme: runtime.NewScheme(),
|
||||
}
|
||||
|
||||
// Setup converter to understand custom CRD types
|
||||
_ = contour.AddToScheme(uc.scheme)
|
||||
_ = projectcontour.AddToScheme(uc.scheme)
|
||||
|
||||
// Add the core types we need
|
||||
if err := scheme.AddToScheme(uc.scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uc, nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user