mirror of
https://github.com/traefik/traefik.git
synced 2025-10-13 08:31:15 +02:00
Add Knative provider
This commit is contained in:
parent
3f23afb2c6
commit
13bcdebc89
@ -5,18 +5,18 @@ on:
|
||||
branches:
|
||||
- '*'
|
||||
paths:
|
||||
- '.github/workflows/test-conformance.yaml'
|
||||
- '.github/workflows/test-gateway-api-conformance.yaml'
|
||||
- 'pkg/provider/kubernetes/gateway/**'
|
||||
- 'integration/fixtures/k8s-conformance/**'
|
||||
- 'integration/k8s_conformance_test.go'
|
||||
|
||||
env:
|
||||
GO_VERSION: '1.23'
|
||||
GO_VERSION: '1.24'
|
||||
CGO_ENABLED: 0
|
||||
|
||||
jobs:
|
||||
|
||||
test-conformance:
|
||||
test-gateway-api-conformance:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@ -30,6 +30,10 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Avoid generating webui
|
||||
run: |
|
||||
touch webui/static/index.html
|
||||
|
||||
- name: K8s Gateway API conformance test and report
|
||||
run: |
|
||||
make test-gateway-api-conformance
|
4
.github/workflows/test-integration.yaml
vendored
4
.github/workflows/test-integration.yaml
vendored
@ -30,6 +30,10 @@ jobs:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
|
||||
- name: Avoid generating webui
|
||||
run: |
|
||||
touch webui/static/index.html
|
||||
|
||||
- name: Build binary
|
||||
run: make binary-linux-amd64
|
||||
|
||||
|
50
.github/workflows/test-knative-conformance.yaml
vendored
Normal file
50
.github/workflows/test-knative-conformance.yaml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
name: Test Knative conformance
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
paths:
|
||||
- '.github/workflows/test-knative-conformance.yaml'
|
||||
- 'pkg/provider/kubernetes/knative/**'
|
||||
- 'integration/fixtures/knative/**'
|
||||
- 'integration/knative_conformance_test.go'
|
||||
|
||||
env:
|
||||
GO_VERSION: '1.24'
|
||||
CGO_ENABLED: 0
|
||||
|
||||
jobs:
|
||||
|
||||
test-knative-conformance:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Set up KO
|
||||
uses: ko-build/setup-ko@v0.6
|
||||
env:
|
||||
KO_DOCKER_REPO: ko.local
|
||||
|
||||
- name: Upload Test Images
|
||||
run: |
|
||||
# Download the test image templates.
|
||||
go mod vendor
|
||||
./integration/fixtures/knative/upload-test-images.sh
|
||||
|
||||
- name: Avoid generating webui
|
||||
run: |
|
||||
touch webui/static/index.html
|
||||
|
||||
- name: Knative conformance test
|
||||
run: |
|
||||
make test-knative-conformance
|
7
Makefile
7
Makefile
@ -100,11 +100,16 @@ test-integration:
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go test ./integration -test.timeout=20m -failfast -v $(TESTFLAGS)
|
||||
|
||||
.PHONY: test-gateway-api-conformance
|
||||
#? test-gateway-api-conformance: Run the conformance tests
|
||||
#? test-gateway-api-conformance: Run the Gateway API conformance tests
|
||||
test-gateway-api-conformance: build-image-dirty
|
||||
# In case of a new Minor/Major version, the k8sConformanceTraefikVersion needs to be updated.
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go test ./integration -v -test.run K8sConformanceSuite -k8sConformance -k8sConformanceTraefikVersion="v3.5" $(TESTFLAGS)
|
||||
|
||||
.PHONY: test-knative-conformance
|
||||
#? test-knative-conformance: Run the Knative conformance tests
|
||||
test-knative-conformance: build-image-dirty
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go test ./integration/integration_test.go ./integration/knative_conformance_test.go -v -tags knativeConformance -test.run KnativeConformanceSuite
|
||||
|
||||
.PHONY: test-ui-unit
|
||||
#? test-ui-unit: Run the unit tests for the webui
|
||||
test-ui-unit:
|
||||
|
@ -0,0 +1,50 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: knative-networking-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.internal.knative.dev
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.internal.knative.dev
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: gateway-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: knative-networking-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-controller
|
||||
namespace: default
|
@ -123,6 +123,7 @@ THIS FILE MUST NOT BE EDITED BY HAND
|
||||
| <a id="experimental-abortonpluginfailure" href="#experimental-abortonpluginfailure" title="#experimental-abortonpluginfailure">experimental.abortonpluginfailure</a> | Defines whether all plugins must be loaded successfully for Traefik to start. | false |
|
||||
| <a id="experimental-fastproxy" href="#experimental-fastproxy" title="#experimental-fastproxy">experimental.fastproxy</a> | Enables the FastProxy implementation. | false |
|
||||
| <a id="experimental-fastproxy-debug" href="#experimental-fastproxy-debug" title="#experimental-fastproxy-debug">experimental.fastproxy.debug</a> | Enable debug mode for the FastProxy implementation. | false |
|
||||
| <a id="experimental-knative" href="#experimental-knative" title="#experimental-knative">experimental.knative</a> | Allow the Knative provider usage. | false |
|
||||
| <a id="experimental-kubernetesgateway" href="#experimental-kubernetesgateway" title="#experimental-kubernetesgateway">experimental.kubernetesgateway</a> | (Deprecated) Allow the Kubernetes gateway api provider usage. | false |
|
||||
| <a id="experimental-kubernetesingressnginx" href="#experimental-kubernetesingressnginx" title="#experimental-kubernetesingressnginx">experimental.kubernetesingressnginx</a> | Allow the Kubernetes Ingress NGINX provider usage. | false |
|
||||
| <a id="experimental-localplugins-name" href="#experimental-localplugins-name" title="#experimental-localplugins-name">experimental.localplugins._name_</a> | Local plugins configuration. | false |
|
||||
@ -319,6 +320,21 @@ THIS FILE MUST NOT BE EDITED BY HAND
|
||||
| <a id="providers-http-tls-cert" href="#providers-http-tls-cert" title="#providers-http-tls-cert">providers.http.tls.cert</a> | TLS cert | |
|
||||
| <a id="providers-http-tls-insecureskipverify" href="#providers-http-tls-insecureskipverify" title="#providers-http-tls-insecureskipverify">providers.http.tls.insecureskipverify</a> | TLS insecure skip verify | false |
|
||||
| <a id="providers-http-tls-key" href="#providers-http-tls-key" title="#providers-http-tls-key">providers.http.tls.key</a> | TLS key | |
|
||||
| <a id="providers-knative" href="#providers-knative" title="#providers-knative">providers.knative</a> | Enables Knative provider. | false |
|
||||
| <a id="providers-knative-certauthfilepath" href="#providers-knative-certauthfilepath" title="#providers-knative-certauthfilepath">providers.knative.certauthfilepath</a> | Kubernetes certificate authority file path (not needed for in-cluster client). | |
|
||||
| <a id="providers-knative-endpoint" href="#providers-knative-endpoint" title="#providers-knative-endpoint">providers.knative.endpoint</a> | Kubernetes server endpoint (required for external cluster client). | |
|
||||
| <a id="providers-knative-labelselector" href="#providers-knative-labelselector" title="#providers-knative-labelselector">providers.knative.labelselector</a> | Kubernetes label selector to use. | |
|
||||
| <a id="providers-knative-namespaces" href="#providers-knative-namespaces" title="#providers-knative-namespaces">providers.knative.namespaces</a> | Kubernetes namespaces. | |
|
||||
| <a id="providers-knative-privateentrypoints" href="#providers-knative-privateentrypoints" title="#providers-knative-privateentrypoints">providers.knative.privateentrypoints</a> | Entrypoint names used to expose the Ingress privately. If empty local Ingresses are skipped. | |
|
||||
| <a id="providers-knative-privateservice" href="#providers-knative-privateservice" title="#providers-knative-privateservice">providers.knative.privateservice</a> | Kubernetes service used to expose the networking controller privately. | |
|
||||
| <a id="providers-knative-privateservice-name" href="#providers-knative-privateservice-name" title="#providers-knative-privateservice-name">providers.knative.privateservice.name</a> | Name of the Kubernetes service. | |
|
||||
| <a id="providers-knative-privateservice-namespace" href="#providers-knative-privateservice-namespace" title="#providers-knative-privateservice-namespace">providers.knative.privateservice.namespace</a> | Namespace of the Kubernetes service. | |
|
||||
| <a id="providers-knative-publicentrypoints" href="#providers-knative-publicentrypoints" title="#providers-knative-publicentrypoints">providers.knative.publicentrypoints</a> | Entrypoint names used to expose the Ingress publicly. If empty an Ingress is exposed on all entrypoints. | |
|
||||
| <a id="providers-knative-publicservice" href="#providers-knative-publicservice" title="#providers-knative-publicservice">providers.knative.publicservice</a> | Kubernetes service used to expose the networking controller publicly. | |
|
||||
| <a id="providers-knative-publicservice-name" href="#providers-knative-publicservice-name" title="#providers-knative-publicservice-name">providers.knative.publicservice.name</a> | Name of the Kubernetes service. | |
|
||||
| <a id="providers-knative-publicservice-namespace" href="#providers-knative-publicservice-namespace" title="#providers-knative-publicservice-namespace">providers.knative.publicservice.namespace</a> | Namespace of the Kubernetes service. | |
|
||||
| <a id="providers-knative-throttleduration" href="#providers-knative-throttleduration" title="#providers-knative-throttleduration">providers.knative.throttleduration</a> | Ingress refresh throttle duration | 0 |
|
||||
| <a id="providers-knative-token" href="#providers-knative-token" title="#providers-knative-token">providers.knative.token</a> | Kubernetes bearer token (not needed for in-cluster client). | |
|
||||
| <a id="providers-kubernetescrd" href="#providers-kubernetescrd" title="#providers-kubernetescrd">providers.kubernetescrd</a> | Enables Kubernetes CRD provider. | false |
|
||||
| <a id="providers-kubernetescrd-allowcrossnamespace" href="#providers-kubernetescrd-allowcrossnamespace" title="#providers-kubernetescrd-allowcrossnamespace">providers.kubernetescrd.allowcrossnamespace</a> | Allow cross namespace resource reference. | false |
|
||||
| <a id="providers-kubernetescrd-allowemptyservices" href="#providers-kubernetescrd-allowemptyservices" title="#providers-kubernetescrd-allowemptyservices">providers.kubernetescrd.allowemptyservices</a> | Allow the creation of services without endpoints. | false |
|
||||
|
@ -0,0 +1,142 @@
|
||||
---
|
||||
title: "Traefik Knative Documentation"
|
||||
description: "Learn how to use the Knative as a provider for configuration discovery in Traefik Proxy. Read the technical documentation."
|
||||
---
|
||||
|
||||
# Traefik & Knative
|
||||
|
||||
The Traefik Knative provider integrates with Knative Serving to provide advanced traffic management and routing capabilities for serverless applications.
|
||||
|
||||
[Knative](https://knative.dev) is a Kubernetes-based platform that enables serverless workloads with features like scale-to-zero,
|
||||
automatic scaling, and revision management.
|
||||
|
||||
The provider watches Knative `Ingress` resources and automatically configures Traefik routing rules,
|
||||
enabling seamless integration between Traefik's networking capabilities and Knative's serverless platform.
|
||||
|
||||
## Requirements
|
||||
|
||||
{!kubernetes-requirements.md!}
|
||||
|
||||
1. Install/update the Knative CRDs.
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.19.0/serving-crds.yaml
|
||||
```
|
||||
|
||||
2. Install the Knative Serving core components.
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.19.0/serving-core.yaml
|
||||
```
|
||||
|
||||
3. Update the config-network configuration to use the Traefik ingress class.
|
||||
|
||||
```bash
|
||||
kubectl patch configmap/config-network \
|
||||
-n knative-serving \
|
||||
--type merge \
|
||||
-p '{"data":{"ingress.class":"traefik.ingress.networking.knative.dev"}}'
|
||||
```
|
||||
|
||||
4. Add a custom domain to your Knative configuration (Optional).
|
||||
|
||||
```bash
|
||||
kubectl patch configmap config-domain \
|
||||
-n knative-serving \
|
||||
--type='merge' \
|
||||
-p='{"data":{"example.com":""}}'
|
||||
```
|
||||
|
||||
5. Install/update the Traefik [RBAC](../../../dynamic-configuration/kubernetes-knative-rbac.yml).
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v3.6/docs/content/reference/dynamic-configuration/kubernetes-knative-rbac.yml
|
||||
```
|
||||
|
||||
## Configuration Example
|
||||
|
||||
As this provider is an experimental feature, it needs to be enabled in the experimental and in the provider sections of the configuration.
|
||||
You can enable the Knative provider as detailed below:
|
||||
|
||||
```yaml tab="File (YAML)"
|
||||
experimental:
|
||||
knative: true
|
||||
|
||||
providers:
|
||||
knative: {}
|
||||
```
|
||||
|
||||
```toml tab="File (TOML)"
|
||||
[experimental.knative]
|
||||
|
||||
[providers.knative]
|
||||
```
|
||||
|
||||
```bash tab="CLI"
|
||||
--experimental.knative=true
|
||||
--providers.knative=true
|
||||
```
|
||||
|
||||
The Knative provider uses the Knative API to retrieve its routing configuration.
|
||||
The provider then watches for incoming Knative events and derives the corresponding dynamic configuration from it.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
<!-- markdownlint-disable MD013 -->
|
||||
|
||||
| Field | Description | Default | Required |
|
||||
|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------|:---------|
|
||||
| <a id="providers-providersThrottleDuration" href="#providers-providersThrottleDuration" title="#providers-providersThrottleDuration">`providers.providersThrottleDuration`</a> | Minimum amount of time to wait for, after a configuration reload, before taking into account any new configuration refresh event.<br />If multiple events occur within this time, only the most recent one is taken into account, and all others are discarded.<br />**This option cannot be set per provider, but the throttling algorithm applies to each of them independently.** | 2s | No |
|
||||
| <a id="providers-knative-endpoint" href="#providers-knative-endpoint" title="#providers-knative-endpoint">providers.knative.endpoint</a> | Server endpoint URL.<br />More information [here](#endpoint). | |
|
||||
| <a id="providers-knative-token" href="#providers-knative-token" title="#providers-knative-token">providers.knative.token</a> | Bearer token used for the Kubernetes client configuration. | |
|
||||
| <a id="providers-knative-certauthfilepath" href="#providers-knative-certauthfilepath" title="#providers-knative-certauthfilepath">providers.knative.certauthfilepath</a> | Path to the certificate authority file.<br />Used for the Kubernetes client configuration. | |
|
||||
| <a id="providers-knative-namespaces" href="#providers-knative-namespaces" title="#providers-knative-namespaces">providers.knative.namespaces</a> | Array of namespaces to watch.<br />If left empty, watch all namespaces. | |
|
||||
| <a id="providers-knative-labelselector" href="#providers-knative-labelselector" title="#providers-knative-labelselector">providers.knative.labelselector</a> | Allow filtering Knative Ingress objects using label selectors. | |
|
||||
| <a id="providers-knative-throttleduration" href="#providers-knative-throttleduration" title="#providers-knative-throttleduration">providers.knative.throttleduration</a> | Minimum amount of time to wait between two Kubernetes events before producing a new configuration.<br />This prevents a Kubernetes cluster that updates many times per second from continuously changing your Traefik configuration.<br />If empty, every event is caught. | 0 |
|
||||
| <a id="providers-knative-privateentrypoints" href="#providers-knative-privateentrypoints" title="#providers-knative-privateentrypoints">providers.knative.privateentrypoints</a> | Entrypoint names used to expose the Ingress privately. If empty local Ingresses are skipped. | |
|
||||
| <a id="providers-knative-privateservice" href="#providers-knative-privateservice" title="#providers-knative-privateservice">providers.knative.privateservice</a> | Kubernetes service used to expose the networking controller privately. | |
|
||||
| <a id="providers-knative-privateservice-name" href="#providers-knative-privateservice-name" title="#providers-knative-privateservice-name">providers.knative.privateservice.name</a> | Name of the private Kubernetes service. | |
|
||||
| <a id="providers-knative-privateservice-namespace" href="#providers-knative-privateservice-namespace" title="#providers-knative-privateservice-namespace">providers.knative.privateservice.namespace</a> | Namespace of the private Kubernetes service. | |
|
||||
| <a id="providers-knative-publicentrypoints" href="#providers-knative-publicentrypoints" title="#providers-knative-publicentrypoints">providers.knative.publicentrypoints</a> | Entrypoint names used to expose the Ingress publicly. If empty an Ingress is exposed on all entrypoints. | |
|
||||
| <a id="providers-knative-publicservice" href="#providers-knative-publicservice" title="#providers-knative-publicservice">providers.knative.publicservice</a> | Kubernetes service used to expose the networking controller publicly. | |
|
||||
| <a id="providers-knative-publicservice-name" href="#providers-knative-publicservice-name" title="#providers-knative-publicservice-name">providers.knative.publicservice.name</a> | Name of the public Kubernetes service. | |
|
||||
| <a id="providers-knative-publicservice-namespace" href="#providers-knative-publicservice-namespace" title="#providers-knative-publicservice-namespace">providers.knative.publicservice.namespace</a> | Namespace of the public Kubernetes service. | |
|
||||
|
||||
<!-- markdownlint-enable MD013 -->
|
||||
|
||||
### `endpoint`
|
||||
|
||||
The Kubernetes server endpoint URL.
|
||||
|
||||
When deployed into Kubernetes, Traefik reads the environment variables `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` or `KUBECONFIG` to construct the endpoint.
|
||||
|
||||
The access token is looked up in `/var/run/secrets/kubernetes.io/serviceaccount/token` and the SSL CA certificate in `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`.
|
||||
Both are mounted automatically when deployed inside Kubernetes.
|
||||
|
||||
The endpoint may be specified to override the environment variable values inside a cluster.
|
||||
|
||||
When the environment variables are not found, Traefik tries to connect to the Knative API server with an external-cluster client.
|
||||
In this case, the endpoint is required.
|
||||
Specifically, it may be set to the URL used by `kubectl proxy` to connect to a Knative cluster using the granted authentication and authorization of the associated kubeconfig.
|
||||
|
||||
```yaml tab="File (YAML)"
|
||||
providers:
|
||||
knative:
|
||||
endpoint: "http://localhost:8080"
|
||||
# ...
|
||||
```
|
||||
|
||||
```toml tab="File (TOML)"
|
||||
[providers.knative]
|
||||
endpoint = "http://localhost:8080"
|
||||
# ...
|
||||
```
|
||||
|
||||
```bash tab="CLI"
|
||||
--providers.knative.endpoint=http://localhost:8080
|
||||
```
|
||||
## Routing Configuration
|
||||
|
||||
See the dedicated section in [routing](../../../routing-configuration/kubernetes/knative.md).
|
||||
|
||||
{!traefik-for-business-applications.md!}
|
@ -0,0 +1,96 @@
|
||||
---
|
||||
title: "Traefik Knative Documentation"
|
||||
description: "The Knative provider can be used for routing and load balancing in Traefik Proxy. View examples in the technical documentation."
|
||||
---
|
||||
|
||||
# Traefik & Knative
|
||||
|
||||
When using the Knative provider, Traefik leverages Knative's Custom Resource Definitions (CRDs) to obtain its routing configuration.
|
||||
For detailed information on Knative concepts and resources, refer to the official [documentation](https://knative.dev/docs/).
|
||||
|
||||
The Knative provider supports version [v1.19.0](https://github.com/knative/serving/releases/tag/knative-v1.19.0) of the specification.
|
||||
|
||||
## Deploying a Knative Service
|
||||
|
||||
A `Service` is a core resource in the Knative specification that defines the entry point for traffic into a Knative application.
|
||||
It is linked to a `Ingress`, which specifies the Knative networking controller responsible for managing and handling the traffic,
|
||||
ensuring that it is directed to the appropriate Knative backend services.
|
||||
|
||||
The following `Service` manifest configures the running Traefik controller to handle the incoming traffic.
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: serving.knative.dev/v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helloworld-go
|
||||
namespace: default
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/knative-samples/helloworld-go
|
||||
env:
|
||||
- name: TARGET
|
||||
value: "Go Sample v1"
|
||||
```
|
||||
|
||||
Once everything is deployed, sending a `GET` request to the HTTP endpoint should return the following response:
|
||||
|
||||
```shell
|
||||
$ curl http://helloworld-go.default.example.com
|
||||
|
||||
Hello Go Sample v1!
|
||||
```
|
||||
|
||||
!!! Note
|
||||
|
||||
The `example.com` domain is the public domain configured when deploying the Traefik controller.
|
||||
Check out [the install configuration](../../install-configuration/providers/kubernetes/knative.md) for more details.
|
||||
|
||||
### Tag based routing
|
||||
|
||||
To add tag-based routing with percentage in Knative, you can define the `traffic` section in your `Service` manifest to include different revisions with specific tags and percentages.
|
||||
Here is an example:
|
||||
|
||||
```yaml
|
||||
apiVersion: serving.knative.dev/v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helloworld-go
|
||||
namespace: default
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/knative-samples/helloworld-go
|
||||
env:
|
||||
- name: TARGET
|
||||
value: "Go Sample v2"
|
||||
traffic:
|
||||
- tag: v1
|
||||
revisionName: helloworld-go-00001
|
||||
percent: 50
|
||||
- tag: v2
|
||||
revisionName: helloworld-go-00002
|
||||
percent: 50
|
||||
```
|
||||
|
||||
In this example:
|
||||
- The `traffic` section specifies two revisions (`helloworld-go-00001` and `helloworld-go-00002`) with tags `v1` and `v2`, each receiving 50% of the traffic.
|
||||
- The `tag` field allows you to route traffic to specific revisions using the tag.
|
||||
|
||||
You can access the tagged revisions using these URLs:
|
||||
|
||||
- `http://v1-helloworld-go.default.example.com`
|
||||
- `http://v2-helloworld-go.default.example.com`
|
||||
|
||||
Use the default URL to access percentage-based routing:
|
||||
|
||||
- `http://helloworld-go.default.example.com`
|
||||
|
||||
### HTTP/HTTPS
|
||||
|
||||
Check out the Knative documentation for [HTTP/HTTPS configuration](https://knative.dev/docs/serving/encryption/external-domain-tls/#configure-external-domain-encryption).
|
||||
|
||||
{!traefik-for-business-applications.md!}
|
@ -228,6 +228,7 @@ nav:
|
||||
- 'Kubernetes CRD' : 'reference/install-configuration/providers/kubernetes/kubernetes-crd.md'
|
||||
- 'Kubernetes Ingress' : 'reference/install-configuration/providers/kubernetes/kubernetes-ingress.md'
|
||||
- 'Kubernetes Ingress NGINX' : 'reference/install-configuration/providers/kubernetes/kubernetes-ingress-nginx.md'
|
||||
- 'Knative': 'reference/install-configuration/providers/kubernetes/knative.md'
|
||||
- 'Docker': 'reference/install-configuration/providers/docker.md'
|
||||
- 'Swarm': 'reference/install-configuration/providers/swarm.md'
|
||||
- 'Hashicorp':
|
||||
@ -345,6 +346,7 @@ nav:
|
||||
- 'IngressRouteUDP' : 'reference/routing-configuration/kubernetes/crd/udp/ingressrouteudp.md'
|
||||
- 'Ingress' : 'reference/routing-configuration/kubernetes/ingress.md'
|
||||
- 'Ingress NGINX' : 'reference/routing-configuration/kubernetes/ingress-nginx.md'
|
||||
- 'Knative': 'reference/routing-configuration/kubernetes/knative.md'
|
||||
- 'Label & Tag Providers' :
|
||||
- 'Docker' : 'reference/routing-configuration/other-providers/docker.md'
|
||||
- 'Swarm' : 'reference/routing-configuration/other-providers/swarm.md'
|
||||
|
12
go.mod
12
go.mod
@ -63,7 +63,7 @@ require (
|
||||
github.com/stealthrocket/wasi-go v0.8.0
|
||||
github.com/stealthrocket/wazergo v0.19.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/stvp/go-udp-testing v0.0.0-20191102171040-06b61409b154 // No tag on the repo.
|
||||
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807 // No tag on the repo.
|
||||
github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046 // No tag on the repo.
|
||||
github.com/testcontainers/testcontainers-go v0.32.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.32.0
|
||||
@ -111,6 +111,8 @@ require (
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // No tag on the repo.
|
||||
knative.dev/networking v0.0.0-20241022012959-60e29ff520dc
|
||||
knative.dev/pkg v0.0.0-20241021183759-9b9d535af5ad
|
||||
mvdan.cc/xurls/v2 v2.5.0
|
||||
sigs.k8s.io/controller-runtime v0.20.4
|
||||
sigs.k8s.io/gateway-api v1.3.0
|
||||
@ -171,6 +173,7 @@ require (
|
||||
github.com/baidubce/bce-sdk-go v0.9.243 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blendle/zapdriver v1.3.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bytedance/sonic v1.10.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
@ -191,7 +194,7 @@ require (
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/dnsimple/dnsimple-go/v4 v4.0.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/exoscale/egoscale/v3 v3.1.26 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
@ -225,6 +228,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
@ -309,6 +313,7 @@ require (
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/openzipkin/zipkin-go v0.4.3 // indirect
|
||||
github.com/ovh/go-ovh v1.9.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/peterhellberg/link v1.2.0 // indirect
|
||||
@ -321,6 +326,7 @@ require (
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/regfish/regfish-dnsapi-go v0.1.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect
|
||||
github.com/sacloud/api-client-go v0.3.3 // indirect
|
||||
github.com/sacloud/go-http v0.1.9 // indirect
|
||||
github.com/sacloud/iaas-api-go v1.17.1 // indirect
|
||||
@ -367,6 +373,7 @@ require (
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
||||
go.mongodb.org/mongo-driver v1.13.1 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.41.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
@ -385,6 +392,7 @@ require (
|
||||
golang.org/x/oauth2 v0.31.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/api v0.249.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||
|
35
go.sum
35
go.sum
@ -37,6 +37,10 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
@ -248,6 +252,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE=
|
||||
github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
@ -267,6 +273,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@ -363,8 +371,8 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk=
|
||||
github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
@ -372,6 +380,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/exoscale/egoscale/v3 v3.1.26 h1:bXXT0zVLbE4QFm6tmt0bg6ZPk9pQgUA3Z8SJrctQ7b0=
|
||||
@ -482,8 +492,8 @@ github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaC
|
||||
github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
|
||||
github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
@ -526,6 +536,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
@ -1024,6 +1035,8 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE=
|
||||
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
|
||||
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
|
||||
github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
|
||||
github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
@ -1097,6 +1110,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
|
||||
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
@ -1117,6 +1132,8 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 h1:18kd+8ZUlt/ARXhljq+14TwAoKa61q6dX8jtwOf6DH8=
|
||||
github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
@ -1231,8 +1248,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/stvp/go-udp-testing v0.0.0-20191102171040-06b61409b154 h1:XGopsea1Dw7ecQ8JscCNQXDGYAKDiWjDeXnpN/+BY9g=
|
||||
github.com/stvp/go-udp-testing v0.0.0-20191102171040-06b61409b154/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
|
||||
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807 h1:LUsDduamlucuNnWcaTbXQ6aLILFcLXADpOzeEH3U+OI=
|
||||
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
@ -1357,6 +1374,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/featuregate v1.41.0 h1:CL4UMsMQj35nMJC3/jUu8VvYB4MHirbAX4B0Z/fCVLY=
|
||||
@ -1837,6 +1856,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
@ -2005,6 +2026,10 @@ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJ
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/networking v0.0.0-20241022012959-60e29ff520dc h1:0d9XXRLlyuHfINZLlYqo/BYe/+chqqNBMLKJldjTbtw=
|
||||
knative.dev/networking v0.0.0-20241022012959-60e29ff520dc/go.mod h1:G56j6VCLzfaN9yZ4IqfNyN4c3U1czvhUmKeZX4UjQ8Q=
|
||||
knative.dev/pkg v0.0.0-20241021183759-9b9d535af5ad h1:Nrjtr2H168rJeamH4QdyLMV1lEKHejNhaj1ymgQMfLk=
|
||||
knative.dev/pkg v0.0.0-20241021183759-9b9d535af5ad/go.mod h1:StJI72GWcm/iErmk4RqFJiOo8RLbVqPbHxUqeVwAzeo=
|
||||
mvdan.cc/xurls/v2 v2.5.0 h1:lyBNOm8Wo71UknhUs4QTFUNNMyxy2JEIaKKo0RWOh+8=
|
||||
mvdan.cc/xurls/v2 v2.5.0/go.mod h1:yQgaGQ1rFtJUzkmKiHYSSfuQxqfYmd//X6PxvholpeE=
|
||||
nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
|
||||
|
6692
integration/fixtures/knative/00-knative-crd-v1.19.0.yml
Normal file
6692
integration/fixtures/knative/00-knative-crd-v1.19.0.yml
Normal file
File diff suppressed because it is too large
Load Diff
50
integration/fixtures/knative/01-rbac.yml
Normal file
50
integration/fixtures/knative/01-rbac.yml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: knative-networking-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.internal.knative.dev
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.internal.knative.dev
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: traefik
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: knative-networking-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik
|
||||
namespace: traefik
|
102
integration/fixtures/knative/02-traefik.yml
Normal file
102
integration/fixtures/knative/02-traefik.yml
Normal file
@ -0,0 +1,102 @@
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik
|
||||
|
||||
---
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
labels:
|
||||
app: traefik
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: traefik
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: traefik
|
||||
spec:
|
||||
serviceAccountName: traefik
|
||||
containers:
|
||||
- name: traefik
|
||||
image: traefik/traefik:latest
|
||||
imagePullPolicy: Never
|
||||
args:
|
||||
- --api.insecure
|
||||
- --log.level=debug
|
||||
- --entrypoints.pweb.address=:80
|
||||
- --entrypoints.pwebsecure.address=:443
|
||||
- --entrypoints.privweb.address=:8080
|
||||
- --entrypoints.privwebsecure.address=:4443
|
||||
- --entrypoints.traefik.address=:9000
|
||||
- --experimental.knative
|
||||
- --providers.knative.publicEntrypoints=pweb,pwebsecure
|
||||
- --providers.knative.publicService.namespace=traefik
|
||||
- --providers.knative.publicService.name=traefik
|
||||
- --providers.knative.privateEntrypoints=privweb,privwebsecure
|
||||
- --providers.knative.privateService.namespace=traefik
|
||||
- --providers.knative.privateService.name=privtraefik
|
||||
- --providers.knative.throttleduration=2s
|
||||
|
||||
ports:
|
||||
- name: pweb
|
||||
containerPort: 80
|
||||
- name: pwebsecure
|
||||
containerPort: 443
|
||||
- name: privweb
|
||||
containerPort: 8080
|
||||
- name: privwebsecure
|
||||
containerPort: 4443
|
||||
- name: traefik
|
||||
containerPort: 9000
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
app: traefik
|
||||
ports:
|
||||
- port: 80
|
||||
name: web
|
||||
targetPort: pweb
|
||||
- port: 443
|
||||
name: websecure
|
||||
targetPort: pwebsecure
|
||||
- port: 9000
|
||||
name: traefik
|
||||
targetPort: traefik
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: privtraefik
|
||||
namespace: traefik
|
||||
spec:
|
||||
selector:
|
||||
app: traefik
|
||||
ports:
|
||||
- port: 80
|
||||
name: web
|
||||
targetPort: privweb
|
||||
- port: 443
|
||||
name: websecure
|
||||
targetPort: privwebsecure
|
9513
integration/fixtures/knative/03-knative-serving-v1.19.0.yaml
Normal file
9513
integration/fixtures/knative/03-knative-serving-v1.19.0.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: serving-tests
|
14
integration/fixtures/knative/tools.go
Normal file
14
integration/fixtures/knative/tools.go
Normal file
@ -0,0 +1,14 @@
|
||||
//go:build tools
|
||||
|
||||
package tools
|
||||
|
||||
// The following dependencies are required by the Knative conformance tests.
|
||||
// They allow to download the test_images when calling "go mod vendor".
|
||||
import (
|
||||
_ "knative.dev/networking/test/test_images/grpc-ping"
|
||||
_ "knative.dev/networking/test/test_images/httpproxy"
|
||||
_ "knative.dev/networking/test/test_images/retry"
|
||||
_ "knative.dev/networking/test/test_images/runtime"
|
||||
_ "knative.dev/networking/test/test_images/timeout"
|
||||
_ "knative.dev/networking/test/test_images/wsserver"
|
||||
)
|
41
integration/fixtures/knative/upload-test-images.sh
Executable file
41
integration/fixtures/knative/upload-test-images.sh
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020 The Knative Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
|
||||
function upload_test_images() {
|
||||
echo ">> Publishing test images"
|
||||
(
|
||||
# Script needs to be executed from repo root
|
||||
cd "$( dirname "$0")/../../../"
|
||||
echo "Current working directory: $(pwd)"
|
||||
local image_dir="vendor/knative.dev/networking/test/test_images"
|
||||
local docker_tag=$1
|
||||
local tag_option=""
|
||||
if [ -n "${docker_tag}" ]; then
|
||||
tag_option="--tags $docker_tag,latest"
|
||||
fi
|
||||
|
||||
# ko resolve is being used for the side-effect of publishing images,
|
||||
# so the resulting yaml produced is ignored.
|
||||
# shellcheck disable=SC2086
|
||||
ko resolve --jobs=4 ${tag_option} -RBf "${image_dir}" > /dev/null
|
||||
)
|
||||
}
|
||||
|
||||
: "${KO_DOCKER_REPO:?"You must set 'KO_DOCKER_REPO', see DEVELOPMENT.md"}"
|
||||
|
||||
upload_test_images "$@"
|
@ -42,7 +42,13 @@ var (
|
||||
k8sConformanceTraefikVersion = flag.String("k8sConformanceTraefikVersion", "dev", "specify the Traefik version for the K8s Gateway API conformance report")
|
||||
)
|
||||
|
||||
const tailscaleSecretFilePath = "tailscale.secret"
|
||||
const (
|
||||
k3sImage = "docker.io/rancher/k3s:v1.32.9-k3s1"
|
||||
traefikImage = "traefik/traefik:latest"
|
||||
traefikDeployment = "deployments/traefik"
|
||||
traefikNamespace = "traefik"
|
||||
tailscaleSecretFilePath = "tailscale.secret"
|
||||
)
|
||||
|
||||
type composeConfig struct {
|
||||
Services map[string]composeService `yaml:"services"`
|
||||
|
@ -37,13 +37,6 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
k3sImage = "docker.io/rancher/k3s:v1.29.3-k3s1"
|
||||
traefikImage = "traefik/traefik:latest"
|
||||
traefikDeployment = "deployments/traefik"
|
||||
traefikNamespace = "traefik"
|
||||
)
|
||||
|
||||
// K8sConformanceSuite tests suite.
|
||||
type K8sConformanceSuite struct {
|
||||
BaseSuite
|
||||
|
178
integration/knative_conformance_test.go
Normal file
178
integration/knative_conformance_test.go
Normal file
@ -0,0 +1,178 @@
|
||||
// Use a build tag to include and run Knative conformance tests.
|
||||
// The Knative conformance toolkit redefines the skip-tests flag,
|
||||
// which conflicts with the testing library and causes a panic.
|
||||
//go:build knativeConformance
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/k3s"
|
||||
"github.com/testcontainers/testcontainers-go/network"
|
||||
"github.com/traefik/traefik/v3/integration/try"
|
||||
"knative.dev/networking/test/conformance/ingress"
|
||||
klog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
)
|
||||
|
||||
const knativeNamespace = "knative-serving"
|
||||
|
||||
var imageNames = []string{
|
||||
traefikImage,
|
||||
"ko.local/grpc-ping:latest",
|
||||
"ko.local/httpproxy:latest",
|
||||
"ko.local/retry:latest",
|
||||
"ko.local/runtime:latest",
|
||||
"ko.local/wsserver:latest",
|
||||
"ko.local/timeout:latest",
|
||||
}
|
||||
|
||||
type KnativeConformanceSuite struct {
|
||||
BaseSuite
|
||||
|
||||
k3sContainer *k3s.K3sContainer
|
||||
}
|
||||
|
||||
func TestKnativeConformanceSuite(t *testing.T) {
|
||||
suite.Run(t, new(KnativeConformanceSuite))
|
||||
}
|
||||
|
||||
func (s *KnativeConformanceSuite) SetupSuite() {
|
||||
s.BaseSuite.SetupSuite()
|
||||
|
||||
// Avoid panic.
|
||||
klog.SetLogger(zap.New())
|
||||
|
||||
provider, err := testcontainers.ProviderDocker.GetProvider()
|
||||
if err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
ctx := s.T().Context()
|
||||
|
||||
// Ensure image is available locally.
|
||||
images, err := provider.ListImages(ctx)
|
||||
if err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
if !slices.ContainsFunc(images, func(img testcontainers.ImageInfo) bool {
|
||||
return img.Name == traefikImage
|
||||
}) {
|
||||
s.T().Fatal("Traefik image is not present")
|
||||
}
|
||||
|
||||
s.k3sContainer, err = k3s.Run(ctx,
|
||||
k3sImage,
|
||||
k3s.WithManifest("./fixtures/knative/00-knative-crd-v1.19.0.yml"),
|
||||
k3s.WithManifest("./fixtures/knative/01-rbac.yml"),
|
||||
k3s.WithManifest("./fixtures/knative/02-traefik.yml"),
|
||||
k3s.WithManifest("./fixtures/knative/03-knative-serving-v1.19.0.yaml"),
|
||||
k3s.WithManifest("./fixtures/knative/04-serving-tests-namespace.yaml"),
|
||||
network.WithNetwork(nil, s.network),
|
||||
)
|
||||
if err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
for _, imageName := range imageNames {
|
||||
if err = s.k3sContainer.LoadImages(ctx, imageName); err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
exitCode, _, err := s.k3sContainer.Exec(ctx, []string{"kubectl", "wait", "-n", traefikNamespace, traefikDeployment, "--for=condition=Available", "--timeout=10s"})
|
||||
if err != nil || exitCode > 0 {
|
||||
s.T().Fatalf("Traefik pod is not ready: %v", err)
|
||||
}
|
||||
|
||||
exitCode, _, err = s.k3sContainer.Exec(ctx, []string{"kubectl", "wait", "-n", knativeNamespace, "deployment/activator", "--for=condition=Available", "--timeout=10s"})
|
||||
if err != nil || exitCode > 0 {
|
||||
s.T().Fatalf("Activator pod is not ready: %v", err)
|
||||
}
|
||||
|
||||
exitCode, _, err = s.k3sContainer.Exec(ctx, []string{"kubectl", "wait", "-n", knativeNamespace, "deployment/controller", "--for=condition=Available", "--timeout=10s"})
|
||||
if err != nil || exitCode > 0 {
|
||||
s.T().Fatalf("Controller pod is not ready: %v", err)
|
||||
}
|
||||
|
||||
exitCode, _, err = s.k3sContainer.Exec(ctx, []string{"kubectl", "wait", "-n", knativeNamespace, "deployment/autoscaler", "--for=condition=Available", "--timeout=10s"})
|
||||
if err != nil || exitCode > 0 {
|
||||
s.T().Fatalf("Autoscaler pod is not ready: %v", err)
|
||||
}
|
||||
|
||||
exitCode, _, err = s.k3sContainer.Exec(ctx, []string{"kubectl", "wait", "-n", knativeNamespace, "deployment/webhook", "--for=condition=Available", "--timeout=10s"})
|
||||
if err != nil || exitCode > 0 {
|
||||
s.T().Fatalf("Webhook pod is not ready: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *KnativeConformanceSuite) TearDownSuite() {
|
||||
ctx := s.T().Context()
|
||||
|
||||
if s.T().Failed() || *showLog {
|
||||
k3sLogs, err := s.k3sContainer.Logs(ctx)
|
||||
if err == nil {
|
||||
if res, err := io.ReadAll(k3sLogs); err == nil {
|
||||
s.T().Log(string(res))
|
||||
}
|
||||
}
|
||||
|
||||
exitCode, result, err := s.k3sContainer.Exec(ctx, []string{"kubectl", "logs", "-n", traefikNamespace, traefikDeployment})
|
||||
if err == nil || exitCode == 0 {
|
||||
if res, err := io.ReadAll(result); err == nil {
|
||||
s.T().Log(string(res))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.k3sContainer.Terminate(ctx); err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
s.BaseSuite.TearDownSuite()
|
||||
}
|
||||
|
||||
func (s *KnativeConformanceSuite) TestKnativeConformance() {
|
||||
// Wait for traefik to start
|
||||
k3sContainerIP, err := s.k3sContainer.ContainerIP(s.T().Context())
|
||||
require.NoError(s.T(), err)
|
||||
|
||||
err = try.GetRequest("http://"+k3sContainerIP+":9000/api/entrypoints", 10*time.Second, try.BodyContains(`"name":"pweb"`))
|
||||
require.NoError(s.T(), err)
|
||||
|
||||
kubeconfig, err := s.k3sContainer.GetKubeConfig(s.T().Context())
|
||||
if err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
// Write the kubeconfig.yaml in a temp file.
|
||||
kubeconfigFile := s.T().TempDir() + "/kubeconfig.yaml"
|
||||
|
||||
if err = os.WriteFile(kubeconfigFile, kubeconfig, 0o644); err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
if err = flag.CommandLine.Set("kubeconfig", kubeconfigFile); err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
if err = flag.CommandLine.Set("ingressClass", "traefik.ingress.networking.knative.dev"); err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
if err = flag.CommandLine.Set("skip-tests", "headers/probe"); err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
ingress.RunConformance(s.T())
|
||||
}
|
@ -177,6 +177,10 @@ type WRRService struct {
|
||||
Name string `json:"name,omitempty" toml:"name,omitempty" yaml:"name,omitempty" export:"true"`
|
||||
Weight *int `json:"weight,omitempty" toml:"weight,omitempty" yaml:"weight,omitempty" export:"true"`
|
||||
|
||||
// Headers defines the HTTP headers that should be added to the request when calling the service.
|
||||
// This is required by the Knative implementation which expects specific headers to be sent.
|
||||
Headers map[string]string `json:"-" toml:"-" yaml:"-" label:"-" file:"-"`
|
||||
|
||||
// Status defines an HTTP status code that should be returned when calling the service.
|
||||
// This is required by the Gateway API implementation which expects specific HTTP status to be returned.
|
||||
Status *int `json:"-" toml:"-" yaml:"-" label:"-" file:"-"`
|
||||
|
@ -2491,6 +2491,13 @@ func (in *WRRService) DeepCopyInto(out *WRRService) {
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.Headers != nil {
|
||||
in, out := &in.Headers, &out.Headers
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Status != nil {
|
||||
in, out := &in.Status, &out.Status
|
||||
*out = new(int)
|
||||
|
@ -9,6 +9,7 @@ type Experimental struct {
|
||||
AbortOnPluginFailure bool `description:"Defines whether all plugins must be loaded successfully for Traefik to start." json:"abortOnPluginFailure,omitempty" toml:"abortOnPluginFailure,omitempty" yaml:"abortOnPluginFailure,omitempty" export:"true"`
|
||||
FastProxy *FastProxyConfig `description:"Enables the FastProxy implementation." json:"fastProxy,omitempty" toml:"fastProxy,omitempty" yaml:"fastProxy,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
OTLPLogs bool `description:"Enables the OpenTelemetry logs integration." json:"otlplogs,omitempty" toml:"otlplogs,omitempty" yaml:"otlplogs,omitempty" export:"true"`
|
||||
Knative bool `description:"Allow the Knative provider usage." json:"knative,omitempty" toml:"knative,omitempty" yaml:"knative,omitempty" export:"true"`
|
||||
KubernetesIngressNGINX bool `description:"Allow the Kubernetes Ingress NGINX provider usage." json:"kubernetesIngressNGINX,omitempty" toml:"kubernetesIngressNGINX,omitempty" yaml:"kubernetesIngressNGINX,omitempty" export:"true"`
|
||||
|
||||
// Deprecated: KubernetesGateway provider is not an experimental feature starting with v3.1. Please remove its usage from the static configuration.
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kubernetes/gateway"
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kubernetes/ingress"
|
||||
ingressnginx "github.com/traefik/traefik/v3/pkg/provider/kubernetes/ingress-nginx"
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kubernetes/knative"
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kv/consul"
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kv/etcd"
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kv/redis"
|
||||
@ -239,6 +240,7 @@ type Providers struct {
|
||||
KubernetesIngressNGINX *ingressnginx.Provider `description:"Enables Kubernetes Ingress NGINX provider." json:"kubernetesIngressNGINX,omitempty" toml:"kubernetesIngressNGINX,omitempty" yaml:"kubernetesIngressNGINX,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
KubernetesCRD *crd.Provider `description:"Enables Kubernetes CRD provider." json:"kubernetesCRD,omitempty" toml:"kubernetesCRD,omitempty" yaml:"kubernetesCRD,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
KubernetesGateway *gateway.Provider `description:"Enables Kubernetes Gateway API provider." json:"kubernetesGateway,omitempty" toml:"kubernetesGateway,omitempty" yaml:"kubernetesGateway,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
Knative *knative.Provider `description:"Enables Knative provider." json:"knative,omitempty" toml:"knative,omitempty" yaml:"knative,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
Rest *rest.Provider `description:"Enables Rest provider." json:"rest,omitempty" toml:"rest,omitempty" yaml:"rest,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
ConsulCatalog *consulcatalog.ProviderBuilder `description:"Enables Consul Catalog provider." json:"consulCatalog,omitempty" toml:"consulCatalog,omitempty" yaml:"consulCatalog,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
Nomad *nomad.ProviderBuilder `description:"Enables Nomad provider." json:"nomad,omitempty" toml:"nomad,omitempty" yaml:"nomad,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
@ -431,6 +433,12 @@ func (c *Configuration) ValidateConfiguration() error {
|
||||
}
|
||||
}
|
||||
|
||||
if c.Providers != nil && c.Providers.Knative != nil {
|
||||
if c.Experimental == nil || !c.Experimental.Knative {
|
||||
return errors.New("the experimental Knative feature must be enabled to use the Knative provider")
|
||||
}
|
||||
}
|
||||
|
||||
if c.AccessLog != nil && c.AccessLog.OTLP != nil {
|
||||
if c.Experimental == nil || !c.Experimental.OTLPLogs {
|
||||
return errors.New("the experimental OTLPLogs feature must be enabled to use OTLP access logging")
|
||||
|
@ -101,6 +101,10 @@ func NewProviderAggregator(conf static.Providers) *ProviderAggregator {
|
||||
p.quietAddProvider(conf.KubernetesCRD)
|
||||
}
|
||||
|
||||
if conf.Knative != nil {
|
||||
p.quietAddProvider(conf.Knative)
|
||||
}
|
||||
|
||||
if conf.KubernetesGateway != nil {
|
||||
p.quietAddProvider(conf.KubernetesGateway)
|
||||
}
|
||||
|
232
pkg/provider/kubernetes/knative/client.go
Normal file
232
pkg/provider/kubernetes/knative/client.go
Normal file
@ -0,0 +1,232 @@
|
||||
package knative
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kubernetes/k8s"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
kinformers "k8s.io/client-go/informers"
|
||||
kclientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
knativenetworkingv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
|
||||
knativenetworkingclientset "knative.dev/networking/pkg/client/clientset/versioned"
|
||||
knativenetworkinginformers "knative.dev/networking/pkg/client/informers/externalversions"
|
||||
)
|
||||
|
||||
const resyncPeriod = 10 * time.Minute
|
||||
|
||||
type clientWrapper struct {
|
||||
csKnativeNetworking knativenetworkingclientset.Interface
|
||||
csKube kclientset.Interface
|
||||
|
||||
factoriesKnativeNetworking map[string]knativenetworkinginformers.SharedInformerFactory
|
||||
factoriesKube map[string]kinformers.SharedInformerFactory
|
||||
|
||||
labelSelector string
|
||||
|
||||
isNamespaceAll bool
|
||||
watchedNamespaces []string
|
||||
}
|
||||
|
||||
func createClientFromConfig(c *rest.Config) (*clientWrapper, error) {
|
||||
csKnativeNetworking, err := knativenetworkingclientset.NewForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
csKube, err := kclientset.NewForConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newClientImpl(csKnativeNetworking, csKube), nil
|
||||
}
|
||||
|
||||
func newClientImpl(csKnativeNetworking knativenetworkingclientset.Interface, csKube kclientset.Interface) *clientWrapper {
|
||||
return &clientWrapper{
|
||||
csKnativeNetworking: csKnativeNetworking,
|
||||
csKube: csKube,
|
||||
factoriesKnativeNetworking: make(map[string]knativenetworkinginformers.SharedInformerFactory),
|
||||
factoriesKube: make(map[string]kinformers.SharedInformerFactory),
|
||||
}
|
||||
}
|
||||
|
||||
// newInClusterClient returns a new Provider client that is expected to run
|
||||
// inside the cluster.
|
||||
func newInClusterClient(endpoint string) (*clientWrapper, error) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
if endpoint != "" {
|
||||
config.Host = endpoint
|
||||
}
|
||||
|
||||
return createClientFromConfig(config)
|
||||
}
|
||||
|
||||
func newExternalClusterClientFromFile(file string) (*clientWrapper, error) {
|
||||
configFromFlags, err := clientcmd.BuildConfigFromFlags("", file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return createClientFromConfig(configFromFlags)
|
||||
}
|
||||
|
||||
// newExternalClusterClient returns a new Provider client that may run outside
|
||||
// of the cluster.
|
||||
// The endpoint parameter must not be empty.
|
||||
func newExternalClusterClient(endpoint, token, caFilePath string) (*clientWrapper, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("endpoint missing for external cluster client")
|
||||
}
|
||||
|
||||
config := &rest.Config{
|
||||
Host: endpoint,
|
||||
BearerToken: token,
|
||||
}
|
||||
|
||||
if caFilePath != "" {
|
||||
caData, err := os.ReadFile(caFilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading CA file %s: %w", caFilePath, err)
|
||||
}
|
||||
|
||||
config.TLSClientConfig = rest.TLSClientConfig{CAData: caData}
|
||||
}
|
||||
|
||||
return createClientFromConfig(config)
|
||||
}
|
||||
|
||||
// WatchAll starts namespace-specific controllers for all relevant kinds.
|
||||
func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) {
|
||||
eventCh := make(chan interface{}, 1)
|
||||
eventHandler := &k8s.ResourceEventHandler{Ev: eventCh}
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = []string{metav1.NamespaceAll}
|
||||
c.isNamespaceAll = true
|
||||
}
|
||||
c.watchedNamespaces = namespaces
|
||||
|
||||
for _, ns := range namespaces {
|
||||
factory := knativenetworkinginformers.NewSharedInformerFactoryWithOptions(c.csKnativeNetworking, resyncPeriod, knativenetworkinginformers.WithNamespace(ns), knativenetworkinginformers.WithTweakListOptions(func(opts *metav1.ListOptions) {
|
||||
opts.LabelSelector = c.labelSelector
|
||||
}))
|
||||
_, err := factory.Networking().V1alpha1().Ingresses().Informer().AddEventHandler(eventHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
factoryKube := kinformers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, kinformers.WithNamespace(ns))
|
||||
_, err = factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = factoryKube.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.factoriesKube[ns] = factoryKube
|
||||
c.factoriesKnativeNetworking[ns] = factory
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
c.factoriesKnativeNetworking[ns].Start(stopCh)
|
||||
c.factoriesKube[ns].Start(stopCh)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
for t, ok := range c.factoriesKnativeNetworking[ns].WaitForCacheSync(stopCh) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
|
||||
}
|
||||
}
|
||||
for t, ok := range c.factoriesKube[ns].WaitForCacheSync(stopCh) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return eventCh, nil
|
||||
}
|
||||
|
||||
func (c *clientWrapper) ListIngresses() []*knativenetworkingv1alpha1.Ingress {
|
||||
var result []*knativenetworkingv1alpha1.Ingress
|
||||
|
||||
for ns, factory := range c.factoriesKnativeNetworking {
|
||||
ings, err := factory.Networking().V1alpha1().Ingresses().Lister().List(labels.Everything()) // todo: label selector
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to list ingresses in namespace %s: %s", ns, err)
|
||||
}
|
||||
result = append(result, ings...)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *clientWrapper) UpdateIngressStatus(ingress *knativenetworkingv1alpha1.Ingress) error {
|
||||
_, err := c.csKnativeNetworking.NetworkingV1alpha1().Ingresses(ingress.Namespace).UpdateStatus(context.TODO(), ingress, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating knative ingress status %s/%s: %w", ingress.Namespace, ingress.Name, err)
|
||||
}
|
||||
|
||||
log.Info().Msgf("Updated status on knative ingress %s/%s", ingress.Namespace, ingress.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetService returns the named service from the given namespace.
|
||||
func (c *clientWrapper) GetService(namespace, name string) (*corev1.Service, error) {
|
||||
if !c.isWatchedNamespace(namespace) {
|
||||
return nil, fmt.Errorf("getting service %s/%s: namespace is not within watched namespaces", namespace, name)
|
||||
}
|
||||
|
||||
return c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name)
|
||||
}
|
||||
|
||||
// GetSecret returns the named secret from the given namespace.
|
||||
func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, error) {
|
||||
if !c.isWatchedNamespace(namespace) {
|
||||
return nil, fmt.Errorf("getting secret %s/%s: namespace is not within watched namespaces", namespace, name)
|
||||
}
|
||||
|
||||
return c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
|
||||
}
|
||||
|
||||
// isWatchedNamespace checks to ensure that the namespace is being watched before we request
|
||||
// it to ensure we don't panic by requesting an out-of-watch object.
|
||||
func (c *clientWrapper) isWatchedNamespace(ns string) bool {
|
||||
if c.isNamespaceAll {
|
||||
return true
|
||||
}
|
||||
for _, watchedNamespace := range c.watchedNamespaces {
|
||||
if watchedNamespace == ns {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lookupNamespace returns the lookup namespace key for the given namespace.
|
||||
// When listening on all namespaces, it returns the client-go identifier ("")
|
||||
// for all-namespaces. Otherwise, it returns the given namespace.
|
||||
// The distinction is necessary because we index all informers on the special
|
||||
// identifier iff all-namespaces are requested but receive specific namespace
|
||||
// identifiers from the Kubernetes API, so we have to bridge this gap.
|
||||
func (c *clientWrapper) lookupNamespace(ns string) string {
|
||||
if c.isNamespaceAll {
|
||||
return metav1.NamespaceAll
|
||||
}
|
||||
return ns
|
||||
}
|
33
pkg/provider/kubernetes/knative/fixtures/cluster_local.yaml
Normal file
33
pkg/provider/kubernetes/knative/fixtures/cluster_local.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: networking.internal.knative.dev/v1alpha1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
networking.knative.dev/ingress.class: traefik.ingress.networking.knative.dev
|
||||
name: helloworld-go
|
||||
namespace: default
|
||||
spec:
|
||||
httpOption: Enabled
|
||||
rules:
|
||||
- hosts:
|
||||
- helloworld-go.default
|
||||
- helloworld-go.default.svc
|
||||
- helloworld-go.default.svc.cluster.local
|
||||
http:
|
||||
paths:
|
||||
- splits:
|
||||
- appendHeaders:
|
||||
Knative-Serving-Namespace: default
|
||||
Knative-Serving-Revision: helloworld-go-00001
|
||||
percent: 50
|
||||
serviceName: helloworld-go-00001
|
||||
serviceNamespace: default
|
||||
servicePort: 80
|
||||
- appendHeaders:
|
||||
Knative-Serving-Namespace: default
|
||||
Knative-Serving-Revision: helloworld-go-00002
|
||||
percent: 50
|
||||
serviceName: helloworld-go-00002
|
||||
serviceNamespace: default
|
||||
servicePort: 80
|
||||
visibility: ClusterLocal
|
33
pkg/provider/kubernetes/knative/fixtures/external_ip.yaml
Normal file
33
pkg/provider/kubernetes/knative/fixtures/external_ip.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: networking.internal.knative.dev/v1alpha1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
networking.knative.dev/ingress.class: traefik.ingress.networking.knative.dev
|
||||
name: helloworld-go
|
||||
namespace: default
|
||||
spec:
|
||||
httpOption: Enabled
|
||||
rules:
|
||||
- hosts:
|
||||
- helloworld-go.default
|
||||
- helloworld-go.default.svc
|
||||
- helloworld-go.default.svc.cluster.local
|
||||
http:
|
||||
paths:
|
||||
- splits:
|
||||
- appendHeaders:
|
||||
Knative-Serving-Namespace: default
|
||||
Knative-Serving-Revision: helloworld-go-00001
|
||||
percent: 50
|
||||
serviceName: helloworld-go-00001
|
||||
serviceNamespace: default
|
||||
servicePort: 80
|
||||
- appendHeaders:
|
||||
Knative-Serving-Namespace: default
|
||||
Knative-Serving-Revision: helloworld-go-00002
|
||||
percent: 50
|
||||
serviceName: helloworld-go-00002
|
||||
serviceNamespace: default
|
||||
servicePort: 80
|
||||
visibility: ExternalIP
|
39
pkg/provider/kubernetes/knative/fixtures/services.yaml
Normal file
39
pkg/provider/kubernetes/knative/fixtures/services.yaml
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helloworld-go-00001
|
||||
namespace: default
|
||||
spec:
|
||||
clusterIP: 10.43.38.208
|
||||
clusterIPs:
|
||||
- 10.43.38.208
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 8012
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 8112
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helloworld-go-00002
|
||||
namespace: default
|
||||
spec:
|
||||
clusterIP: 10.43.44.18
|
||||
clusterIPs:
|
||||
- 10.43.44.18
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 8012
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 8112
|
38
pkg/provider/kubernetes/knative/fixtures/tls.yaml
Normal file
38
pkg/provider/kubernetes/knative/fixtures/tls.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: networking.internal.knative.dev/v1alpha1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
networking.knative.dev/ingress.class: traefik.ingress.networking.knative.dev
|
||||
name: helloworld-go
|
||||
namespace: default
|
||||
spec:
|
||||
httpOption: Enabled
|
||||
tls:
|
||||
- hosts:
|
||||
- helloworld-go.default.svc.cluster.local
|
||||
secretName: secretName
|
||||
secretNamespace: secretNamespace
|
||||
rules:
|
||||
- hosts:
|
||||
- helloworld-go.default
|
||||
- helloworld-go.default.svc
|
||||
- helloworld-go.default.svc.cluster.local
|
||||
http:
|
||||
paths:
|
||||
- splits:
|
||||
- appendHeaders:
|
||||
Knative-Serving-Namespace: default
|
||||
Knative-Serving-Revision: helloworld-go-00001
|
||||
percent: 50
|
||||
serviceName: helloworld-go-00001
|
||||
serviceNamespace: default
|
||||
servicePort: 80
|
||||
- appendHeaders:
|
||||
Knative-Serving-Namespace: default
|
||||
Knative-Serving-Revision: helloworld-go-00002
|
||||
percent: 50
|
||||
serviceName: helloworld-go-00002
|
||||
serviceNamespace: default
|
||||
servicePort: 80
|
||||
visibility: ExternalIP
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: networking.internal.knative.dev/v1alpha1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
networking.knative.dev/ingress.class: foo.ingress.networking.knative.dev
|
||||
name: helloworld-go
|
||||
namespace: default
|
531
pkg/provider/kubernetes/knative/kubernetes.go
Normal file
531
pkg/provider/kubernetes/knative/kubernetes.go
Normal file
@ -0,0 +1,531 @@
|
||||
package knative
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/rs/zerolog/log"
|
||||
ptypes "github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v3/pkg/job"
|
||||
"github.com/traefik/traefik/v3/pkg/observability/logs"
|
||||
"github.com/traefik/traefik/v3/pkg/safe"
|
||||
"github.com/traefik/traefik/v3/pkg/tls"
|
||||
"github.com/traefik/traefik/v3/pkg/types"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/ptr"
|
||||
knativenetworking "knative.dev/networking/pkg/apis/networking"
|
||||
knativenetworkingv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
|
||||
"knative.dev/pkg/network"
|
||||
)
|
||||
|
||||
const (
|
||||
providerName = "knative"
|
||||
traefikIngressClassName = "traefik.ingress.networking.knative.dev"
|
||||
)
|
||||
|
||||
// ServiceRef holds a Kubernetes service reference.
|
||||
type ServiceRef struct {
|
||||
Name string `description:"Name of the Kubernetes service." json:"desc,omitempty" toml:"desc,omitempty" yaml:"desc,omitempty"`
|
||||
Namespace string `description:"Namespace of the Kubernetes service." json:"namespace,omitempty" toml:"namespace,omitempty" yaml:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
Endpoint string `description:"Kubernetes server endpoint (required for external cluster client)." json:"endpoint,omitempty" toml:"endpoint,omitempty" yaml:"endpoint,omitempty"`
|
||||
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)." json:"token,omitempty" toml:"token,omitempty" yaml:"token,omitempty"`
|
||||
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)." json:"certAuthFilePath,omitempty" toml:"certAuthFilePath,omitempty" yaml:"certAuthFilePath,omitempty"`
|
||||
Namespaces []string `description:"Kubernetes namespaces." json:"namespaces,omitempty" toml:"namespaces,omitempty" yaml:"namespaces,omitempty" export:"true"`
|
||||
LabelSelector string `description:"Kubernetes label selector to use." json:"labelSelector,omitempty" toml:"labelSelector,omitempty" yaml:"labelSelector,omitempty" export:"true"`
|
||||
PublicEntrypoints []string `description:"Entrypoint names used to expose the Ingress publicly. If empty an Ingress is exposed on all entrypoints." json:"publicEntrypoints,omitempty" toml:"publicEntrypoints,omitempty" yaml:"publicEntrypoints,omitempty" export:"true"`
|
||||
PublicService ServiceRef `description:"Kubernetes service used to expose the networking controller publicly." json:"publicService,omitempty" toml:"publicService,omitempty" yaml:"publicService,omitempty" export:"true"`
|
||||
PrivateEntrypoints []string `description:"Entrypoint names used to expose the Ingress privately. If empty local Ingresses are skipped." json:"privateEntrypoints,omitempty" toml:"privateEntrypoints,omitempty" yaml:"privateEntrypoints,omitempty" export:"true"`
|
||||
PrivateService ServiceRef `description:"Kubernetes service used to expose the networking controller privately." json:"privateService,omitempty" toml:"privateService,omitempty" yaml:"privateService,omitempty" export:"true"`
|
||||
ThrottleDuration ptypes.Duration `description:"Ingress refresh throttle duration" json:"throttleDuration,omitempty" toml:"throttleDuration,omitempty" yaml:"throttleDuration,omitempty"`
|
||||
|
||||
client *clientWrapper
|
||||
lastConfiguration safe.Safe
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
func (p *Provider) Init() error {
|
||||
logger := log.With().Str(logs.ProviderName, providerName).Logger()
|
||||
|
||||
// Initializes Kubernetes client.
|
||||
var err error
|
||||
p.client, err = p.newK8sClient(logger.WithContext(context.Background()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provide allows the knative provider to provide configurations to traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
logger := log.With().Str(logs.ProviderName, providerName).Logger()
|
||||
ctxLog := logger.WithContext(context.Background())
|
||||
|
||||
pool.GoCtx(func(ctxPool context.Context) {
|
||||
operation := func() error {
|
||||
eventsChan, err := p.client.WatchAll(p.Namespaces, ctxPool.Done())
|
||||
if err != nil {
|
||||
logger.Error().Msgf("Error watching kubernetes events: %v", err)
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
select {
|
||||
case <-timer.C:
|
||||
return err
|
||||
case <-ctxPool.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
throttleDuration := time.Duration(p.ThrottleDuration)
|
||||
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
|
||||
if throttledChan != nil {
|
||||
eventsChan = throttledChan
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctxPool.Done():
|
||||
return nil
|
||||
case event := <-eventsChan:
|
||||
// Note that event is the *first* event that came in during this throttling interval -- if we're hitting our throttle, we may have dropped events.
|
||||
// This is fine, because we don't treat different event types differently.
|
||||
// But if we do in the future, we'll need to track more information about the dropped events.
|
||||
conf, ingressStatuses := p.loadConfiguration(ctxLog)
|
||||
|
||||
confHash, err := hashstructure.Hash(conf, nil)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Error().Msg("Unable to hash the configuration")
|
||||
case p.lastConfiguration.Get() == confHash:
|
||||
logger.Debug().Msgf("Skipping Kubernetes event kind %T", event)
|
||||
default:
|
||||
p.lastConfiguration.Set(confHash)
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: providerName,
|
||||
Configuration: conf,
|
||||
}
|
||||
}
|
||||
|
||||
// If we're throttling,
|
||||
// we sleep here for the throttle duration to enforce that we don't refresh faster than our throttle.
|
||||
// time.Sleep returns immediately if p.ThrottleDuration is 0 (no throttle).
|
||||
time.Sleep(throttleDuration)
|
||||
|
||||
// Updating the ingress status after the throttleDuration allows to wait to make sure that the dynamic conf is updated before updating the status.
|
||||
// This is needed for the conformance tests to pass, for example.
|
||||
for _, ingress := range ingressStatuses {
|
||||
if err := p.updateKnativeIngressStatus(ctxLog, ingress); err != nil {
|
||||
logger.Error().Err(err).Msgf("Error updating status for Ingress %s/%s", ingress.Namespace, ingress.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Error().Msgf("Provider connection error: %v; retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
|
||||
if err != nil {
|
||||
logger.Error().Msgf("Cannot connect to Provider: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
||||
logger := log.Ctx(ctx).With().Logger()
|
||||
|
||||
_, err := labels.Parse(p.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing label selector: %q", p.LabelSelector)
|
||||
}
|
||||
logger.Info().Msgf("Label selector is: %q", p.LabelSelector)
|
||||
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
withEndpoint = fmt.Sprintf(" with endpoint %s", p.Endpoint)
|
||||
}
|
||||
|
||||
var client *clientWrapper
|
||||
switch {
|
||||
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
|
||||
logger.Info().Msgf("Creating in-cluster Provider client%s", withEndpoint)
|
||||
client, err = newInClusterClient(p.Endpoint)
|
||||
case os.Getenv("KUBECONFIG") != "":
|
||||
logger.Info().Msgf("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
|
||||
client, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
|
||||
default:
|
||||
logger.Info().Msgf("Creating cluster-external Provider client%s", withEndpoint)
|
||||
client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.labelSelector = p.LabelSelector
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadConfiguration(ctx context.Context) (*dynamic.Configuration, []*knativenetworkingv1alpha1.Ingress) {
|
||||
conf := &dynamic.Configuration{
|
||||
HTTP: &dynamic.HTTPConfiguration{
|
||||
Routers: make(map[string]*dynamic.Router),
|
||||
Middlewares: make(map[string]*dynamic.Middleware),
|
||||
Services: make(map[string]*dynamic.Service),
|
||||
},
|
||||
}
|
||||
|
||||
var ingressStatuses []*knativenetworkingv1alpha1.Ingress
|
||||
|
||||
uniqCerts := make(map[string]*tls.CertAndStores)
|
||||
for _, ingress := range p.client.ListIngresses() {
|
||||
logger := log.Ctx(ctx).With().
|
||||
Str("ingress", ingress.Name).
|
||||
Str("namespace", ingress.Namespace).
|
||||
Logger()
|
||||
|
||||
if ingress.Annotations[knativenetworking.IngressClassAnnotationKey] != traefikIngressClassName {
|
||||
logger.Debug().Msgf("Skipping Ingress %s/%s", ingress.Namespace, ingress.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := p.loadCertificates(ctx, ingress, uniqCerts); err != nil {
|
||||
logger.Error().Err(err).Msg("Error loading TLS certificates")
|
||||
continue
|
||||
}
|
||||
|
||||
conf.HTTP = mergeHTTPConfigs(conf.HTTP, p.buildRouters(ctx, ingress))
|
||||
|
||||
// TODO: should we handle configuration errors?
|
||||
ingressStatuses = append(ingressStatuses, ingress)
|
||||
}
|
||||
|
||||
if len(uniqCerts) > 0 {
|
||||
conf.TLS = &dynamic.TLSConfiguration{
|
||||
Certificates: slices.Collect(maps.Values(uniqCerts)),
|
||||
}
|
||||
}
|
||||
|
||||
return conf, ingressStatuses
|
||||
}
|
||||
|
||||
// loadCertificates loads the TLS certificates for the given Knative Ingress.
|
||||
// This method mutates the uniqCerts map to add the loaded certificates.
|
||||
func (p *Provider) loadCertificates(ctx context.Context, ingress *knativenetworkingv1alpha1.Ingress, uniqCerts map[string]*tls.CertAndStores) error {
|
||||
for _, t := range ingress.Spec.TLS {
|
||||
// TODO: maybe this could be allowed with an allowCrossNamespace option in the future.
|
||||
if t.SecretNamespace != ingress.Namespace {
|
||||
log.Ctx(ctx).Debug().Msg("TLS secret namespace has to be the same as the Ingress one")
|
||||
continue
|
||||
}
|
||||
|
||||
key := ingress.Namespace + "-" + t.SecretName
|
||||
|
||||
// TODO: as specified in the GoDoc we should validate that the certificates contain the configured Hosts.
|
||||
if _, exists := uniqCerts[key]; !exists {
|
||||
cert, err := p.loadCertificate(ingress.Namespace, t.SecretName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting certificate: %w", err)
|
||||
}
|
||||
uniqCerts[key] = &tls.CertAndStores{Certificate: cert}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadCertificate(namespace, secretName string) (tls.Certificate, error) {
|
||||
secret, err := p.client.GetSecret(namespace, secretName)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("getting secret %s/%s: %w", namespace, secretName, err)
|
||||
}
|
||||
|
||||
certBytes, hasCert := secret.Data[corev1.TLSCertKey]
|
||||
keyBytes, hasKey := secret.Data[corev1.TLSPrivateKeyKey]
|
||||
|
||||
if (!hasCert || len(certBytes) == 0) || (!hasKey || len(keyBytes) == 0) {
|
||||
return tls.Certificate{}, errors.New("secret does not contain a keypair")
|
||||
}
|
||||
|
||||
return tls.Certificate{
|
||||
CertFile: types.FileOrContent(certBytes),
|
||||
KeyFile: types.FileOrContent(keyBytes),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildRouters(ctx context.Context, ingress *knativenetworkingv1alpha1.Ingress) *dynamic.HTTPConfiguration {
|
||||
logger := log.Ctx(ctx).With().Logger()
|
||||
|
||||
conf := &dynamic.HTTPConfiguration{
|
||||
Routers: make(map[string]*dynamic.Router),
|
||||
Middlewares: make(map[string]*dynamic.Middleware),
|
||||
Services: make(map[string]*dynamic.Service),
|
||||
}
|
||||
|
||||
for ri, rule := range ingress.Spec.Rules {
|
||||
if rule.HTTP == nil {
|
||||
logger.Debug().Msgf("No HTTP rule defined for rule %d in Ingress %s", ri, ingress.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
entrypoints := p.PublicEntrypoints
|
||||
if rule.Visibility == knativenetworkingv1alpha1.IngressVisibilityClusterLocal {
|
||||
if p.PrivateEntrypoints == nil {
|
||||
// Skip route creation as no internal entrypoints are defined for cluster local visibility.
|
||||
continue
|
||||
}
|
||||
entrypoints = p.PrivateEntrypoints
|
||||
}
|
||||
|
||||
// TODO: support rewrite host
|
||||
for pi, path := range rule.HTTP.Paths {
|
||||
routerKey := fmt.Sprintf("%s-%s-rule-%d-path-%d", ingress.Namespace, ingress.Name, ri, pi)
|
||||
router := &dynamic.Router{
|
||||
EntryPoints: entrypoints,
|
||||
Rule: buildRule(rule.Hosts, path.Headers, path.Path),
|
||||
Middlewares: make([]string, 0),
|
||||
Service: routerKey + "-wrr",
|
||||
}
|
||||
|
||||
if len(path.AppendHeaders) > 0 {
|
||||
midKey := fmt.Sprintf("%s-append-headers", routerKey)
|
||||
|
||||
router.Middlewares = append(router.Middlewares, midKey)
|
||||
conf.Middlewares[midKey] = &dynamic.Middleware{
|
||||
Headers: &dynamic.Headers{
|
||||
CustomRequestHeaders: path.AppendHeaders,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
wrr, services, err := p.buildWeightedRoundRobin(routerKey, path.Splits)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Error building weighted round robin")
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: support Ingress#HTTPOption to check if HTTP router should redirect to the HTTPS one.
|
||||
conf.Routers[routerKey] = router
|
||||
|
||||
// TODO: at some point we should allow to define a default TLS secret at the provider level to enable TLS with a custom cert when external-domain-tls is disabled.
|
||||
// see https://knative.dev/docs/serving/encryption/external-domain-tls/#manually-obtain-and-renew-certificates
|
||||
if len(ingress.Spec.TLS) > 0 {
|
||||
conf.Routers[routerKey+"-tls"] = &dynamic.Router{
|
||||
EntryPoints: router.EntryPoints,
|
||||
Rule: router.Rule, // TODO: maybe the rule should be a new one containing the TLS hosts injected by Knative.
|
||||
Middlewares: router.Middlewares,
|
||||
Service: router.Service,
|
||||
TLS: &dynamic.RouterTLSConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
conf.Services[routerKey+"-wrr"] = &dynamic.Service{Weighted: wrr}
|
||||
for k, v := range services {
|
||||
conf.Services[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return conf
|
||||
}
|
||||
|
||||
func (p *Provider) buildWeightedRoundRobin(routerKey string, splits []knativenetworkingv1alpha1.IngressBackendSplit) (*dynamic.WeightedRoundRobin, map[string]*dynamic.Service, error) {
|
||||
wrr := &dynamic.WeightedRoundRobin{
|
||||
Services: make([]dynamic.WRRService, 0),
|
||||
}
|
||||
|
||||
services := make(map[string]*dynamic.Service)
|
||||
for si, split := range splits {
|
||||
serviceKey := fmt.Sprintf("%s-split-%d", routerKey, si)
|
||||
|
||||
var err error
|
||||
services[serviceKey], err = p.buildService(split.ServiceNamespace, split.ServiceName, split.ServicePort)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("building service: %w", err)
|
||||
}
|
||||
|
||||
// As described in the spec if there is only one split it defaults to 100.
|
||||
percent := split.Percent
|
||||
if len(splits) == 1 {
|
||||
percent = 100
|
||||
}
|
||||
|
||||
wrr.Services = append(wrr.Services, dynamic.WRRService{
|
||||
Name: serviceKey,
|
||||
Weight: ptr.To(percent),
|
||||
Headers: split.AppendHeaders,
|
||||
})
|
||||
}
|
||||
|
||||
return wrr, services, nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildService(namespace, serviceName string, port intstr.IntOrString) (*dynamic.Service, error) {
|
||||
servers, err := p.buildServers(namespace, serviceName, port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building servers: %w", err)
|
||||
}
|
||||
|
||||
var lb dynamic.ServersLoadBalancer
|
||||
lb.SetDefaults()
|
||||
lb.Servers = servers
|
||||
|
||||
return &dynamic.Service{LoadBalancer: &lb}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildServers(namespace, serviceName string, port intstr.IntOrString) ([]dynamic.Server, error) {
|
||||
service, err := p.client.GetService(namespace, serviceName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting service %s/%s: %w", namespace, serviceName, err)
|
||||
}
|
||||
|
||||
var svcPort *corev1.ServicePort
|
||||
for _, p := range service.Spec.Ports {
|
||||
if p.Name == port.String() || strconv.Itoa(int(p.Port)) == port.String() {
|
||||
svcPort = &p
|
||||
break
|
||||
}
|
||||
}
|
||||
if svcPort == nil {
|
||||
return nil, errors.New("service port not found")
|
||||
}
|
||||
|
||||
if service.Spec.ClusterIP == "" {
|
||||
return nil, errors.New("service does not have a ClusterIP")
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if svcPort.AppProtocol != nil && *svcPort.AppProtocol == knativenetworking.AppProtocolH2C {
|
||||
scheme = "h2c"
|
||||
}
|
||||
|
||||
hostPort := net.JoinHostPort(service.Spec.ClusterIP, strconv.Itoa(int(svcPort.Port)))
|
||||
return []dynamic.Server{{URL: fmt.Sprintf("%s://%s", scheme, hostPort)}}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) updateKnativeIngressStatus(ctx context.Context, ingress *knativenetworkingv1alpha1.Ingress) error {
|
||||
log.Ctx(ctx).Debug().Msgf("Updating status for Ingress %s/%s", ingress.Namespace, ingress.Name)
|
||||
|
||||
var publicLbs []knativenetworkingv1alpha1.LoadBalancerIngressStatus
|
||||
if p.PublicService.Name != "" && p.PublicService.Namespace != "" {
|
||||
publicLbs = append(publicLbs, knativenetworkingv1alpha1.LoadBalancerIngressStatus{
|
||||
DomainInternal: network.GetServiceHostname(p.PublicService.Name, p.PublicService.Namespace),
|
||||
})
|
||||
}
|
||||
|
||||
var privateLbs []knativenetworkingv1alpha1.LoadBalancerIngressStatus
|
||||
if p.PrivateService.Name != "" && p.PrivateService.Namespace != "" {
|
||||
privateLbs = append(privateLbs, knativenetworkingv1alpha1.LoadBalancerIngressStatus{
|
||||
DomainInternal: network.GetServiceHostname(p.PrivateService.Name, p.PrivateService.Namespace),
|
||||
})
|
||||
}
|
||||
|
||||
if ingress.GetStatus() == nil || !ingress.GetStatus().GetCondition(knativenetworkingv1alpha1.IngressConditionNetworkConfigured).IsTrue() || ingress.GetGeneration() != ingress.GetStatus().ObservedGeneration {
|
||||
ingress.Status.MarkNetworkConfigured()
|
||||
ingress.Status.MarkLoadBalancerReady(publicLbs, privateLbs)
|
||||
ingress.Status.ObservedGeneration = ingress.GetGeneration()
|
||||
|
||||
return p.client.UpdateIngressStatus(ingress)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildRule(hosts []string, headers map[string]knativenetworkingv1alpha1.HeaderMatch, path string) string {
|
||||
var operands []string
|
||||
|
||||
if len(hosts) > 0 {
|
||||
var hostRules []string
|
||||
for _, host := range hosts {
|
||||
hostRules = append(hostRules, fmt.Sprintf("Host(`%v`)", host))
|
||||
}
|
||||
operands = append(operands, fmt.Sprintf("(%s)", strings.Join(hostRules, " || ")))
|
||||
}
|
||||
|
||||
if len(headers) > 0 {
|
||||
headerKeys := slices.Collect(maps.Keys(headers))
|
||||
slices.Sort(headerKeys)
|
||||
|
||||
var headerRules []string
|
||||
for _, key := range headerKeys {
|
||||
headerRules = append(headerRules, fmt.Sprintf("Header(`%s`,`%s`)", key, headers[key].Exact))
|
||||
}
|
||||
operands = append(operands, fmt.Sprintf("(%s)", strings.Join(headerRules, " && ")))
|
||||
}
|
||||
|
||||
if len(path) > 0 {
|
||||
operands = append(operands, fmt.Sprintf("PathPrefix(`%s`)", path))
|
||||
}
|
||||
|
||||
return strings.Join(operands, " && ")
|
||||
}
|
||||
|
||||
func mergeHTTPConfigs(confs ...*dynamic.HTTPConfiguration) *dynamic.HTTPConfiguration {
|
||||
conf := &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
Services: map[string]*dynamic.Service{},
|
||||
}
|
||||
|
||||
for _, c := range confs {
|
||||
for k, v := range c.Routers {
|
||||
conf.Routers[k] = v
|
||||
}
|
||||
for k, v := range c.Middlewares {
|
||||
conf.Middlewares[k] = v
|
||||
}
|
||||
for k, v := range c.Services {
|
||||
conf.Services[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return conf
|
||||
}
|
||||
|
||||
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
|
||||
logger := log.Ctx(ctx).With().Logger()
|
||||
if throttleDuration == 0 {
|
||||
return nil
|
||||
}
|
||||
// Create a buffered channel to hold the pending event (if we're delaying processing the event due to throttling)
|
||||
eventsChanBuffered := make(chan interface{}, 1)
|
||||
|
||||
// Run a goroutine that reads events from eventChan and does a non-blocking write to pendingEvent.
|
||||
// This guarantees that writing to eventChan will never block,
|
||||
// and that pendingEvent will have something in it if there's been an event since we read from that channel.
|
||||
pool.GoCtx(func(ctxPool context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctxPool.Done():
|
||||
return
|
||||
case nextEvent := <-eventsChan:
|
||||
select {
|
||||
case eventsChanBuffered <- nextEvent:
|
||||
default:
|
||||
// We already have an event in eventsChanBuffered, so we'll do a refresh as soon as our throttle allows us to.
|
||||
// It's fine to drop the event and keep whatever's in the buffer -- we don't do different things for different events
|
||||
logger.Debug().Msgf("Dropping event kind %T due to throttling", nextEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return eventsChanBuffered
|
||||
}
|
478
pkg/provider/kubernetes/knative/kubernetes_test.go
Normal file
478
pkg/provider/kubernetes/knative/kubernetes_test.go
Normal file
@ -0,0 +1,478 @@
|
||||
package knative
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/traefik/paerser/types"
|
||||
"github.com/traefik/traefik/v3/pkg/config/dynamic"
|
||||
"github.com/traefik/traefik/v3/pkg/provider/kubernetes/k8s"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
kscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/utils/ptr"
|
||||
knativenetworkingv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
|
||||
knfake "knative.dev/networking/pkg/client/clientset/versioned/fake"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// required by k8s.MustParseYaml
|
||||
if err := knativenetworkingv1alpha1.AddToScheme(kscheme.Scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_loadConfiguration(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
paths []string
|
||||
want *dynamic.Configuration
|
||||
wantLen int
|
||||
}{
|
||||
{
|
||||
desc: "Wrong ingress class",
|
||||
paths: []string{"wrong_ingress_class.yaml"},
|
||||
wantLen: 0,
|
||||
want: &dynamic.Configuration{
|
||||
HTTP: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{},
|
||||
Services: map[string]*dynamic.Service{},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Cluster Local",
|
||||
paths: []string{"cluster_local.yaml", "services.yaml"},
|
||||
wantLen: 1,
|
||||
want: &dynamic.Configuration{
|
||||
HTTP: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"default-helloworld-go-rule-0-path-0": {
|
||||
EntryPoints: []string{"priv-http", "priv-https"},
|
||||
Service: "default-helloworld-go-rule-0-path-0-wrr",
|
||||
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
|
||||
Middlewares: []string{},
|
||||
},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"default-helloworld-go-rule-0-path-0-split-0": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Strategy: "wrr",
|
||||
PassHostHeader: ptr.To(true),
|
||||
ResponseForwarding: &dynamic.ResponseForwarding{
|
||||
FlushInterval: types.Duration(100 * time.Millisecond),
|
||||
},
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://10.43.38.208:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"default-helloworld-go-rule-0-path-0-split-1": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Strategy: "wrr",
|
||||
PassHostHeader: ptr.To(true),
|
||||
ResponseForwarding: &dynamic.ResponseForwarding{
|
||||
FlushInterval: types.Duration(100 * time.Millisecond),
|
||||
},
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://10.43.44.18:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"default-helloworld-go-rule-0-path-0-wrr": {
|
||||
Weighted: &dynamic.WeightedRoundRobin{
|
||||
Services: []dynamic.WRRService{
|
||||
{
|
||||
Name: "default-helloworld-go-rule-0-path-0-split-0",
|
||||
Weight: ptr.To(50),
|
||||
Headers: map[string]string{
|
||||
"Knative-Serving-Namespace": "default",
|
||||
"Knative-Serving-Revision": "helloworld-go-00001",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "default-helloworld-go-rule-0-path-0-split-1",
|
||||
Weight: ptr.To(50),
|
||||
Headers: map[string]string{
|
||||
"Knative-Serving-Namespace": "default",
|
||||
"Knative-Serving-Revision": "helloworld-go-00002",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "External IP",
|
||||
paths: []string{"external_ip.yaml", "services.yaml"},
|
||||
wantLen: 1,
|
||||
want: &dynamic.Configuration{
|
||||
HTTP: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"default-helloworld-go-rule-0-path-0": {
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Service: "default-helloworld-go-rule-0-path-0-wrr",
|
||||
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
|
||||
Middlewares: []string{},
|
||||
},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"default-helloworld-go-rule-0-path-0-split-0": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Strategy: "wrr",
|
||||
PassHostHeader: ptr.To(true),
|
||||
ResponseForwarding: &dynamic.ResponseForwarding{
|
||||
FlushInterval: types.Duration(100 * time.Millisecond),
|
||||
},
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://10.43.38.208:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"default-helloworld-go-rule-0-path-0-split-1": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Strategy: "wrr",
|
||||
PassHostHeader: ptr.To(true),
|
||||
ResponseForwarding: &dynamic.ResponseForwarding{
|
||||
FlushInterval: types.Duration(100 * time.Millisecond),
|
||||
},
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://10.43.44.18:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"default-helloworld-go-rule-0-path-0-wrr": {
|
||||
Weighted: &dynamic.WeightedRoundRobin{
|
||||
Services: []dynamic.WRRService{
|
||||
{
|
||||
Name: "default-helloworld-go-rule-0-path-0-split-0",
|
||||
Weight: ptr.To(50),
|
||||
Headers: map[string]string{
|
||||
"Knative-Serving-Namespace": "default",
|
||||
"Knative-Serving-Revision": "helloworld-go-00001",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "default-helloworld-go-rule-0-path-0-split-1",
|
||||
Weight: ptr.To(50),
|
||||
Headers: map[string]string{
|
||||
"Knative-Serving-Namespace": "default",
|
||||
"Knative-Serving-Revision": "helloworld-go-00002",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "TLS",
|
||||
paths: []string{"tls.yaml", "services.yaml"},
|
||||
wantLen: 1,
|
||||
want: &dynamic.Configuration{
|
||||
HTTP: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"default-helloworld-go-rule-0-path-0": {
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Service: "default-helloworld-go-rule-0-path-0-wrr",
|
||||
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
|
||||
Middlewares: []string{},
|
||||
},
|
||||
"default-helloworld-go-rule-0-path-0-tls": {
|
||||
EntryPoints: []string{"http", "https"},
|
||||
Service: "default-helloworld-go-rule-0-path-0-wrr",
|
||||
Rule: "(Host(`helloworld-go.default`) || Host(`helloworld-go.default.svc`) || Host(`helloworld-go.default.svc.cluster.local`))",
|
||||
Middlewares: []string{},
|
||||
TLS: &dynamic.RouterTLSConfig{},
|
||||
},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"default-helloworld-go-rule-0-path-0-split-0": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Strategy: "wrr",
|
||||
PassHostHeader: ptr.To(true),
|
||||
ResponseForwarding: &dynamic.ResponseForwarding{
|
||||
FlushInterval: types.Duration(100 * time.Millisecond),
|
||||
},
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://10.43.38.208:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"default-helloworld-go-rule-0-path-0-split-1": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Strategy: "wrr",
|
||||
PassHostHeader: ptr.To(true),
|
||||
ResponseForwarding: &dynamic.ResponseForwarding{
|
||||
FlushInterval: types.Duration(100 * time.Millisecond),
|
||||
},
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://10.43.44.18:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"default-helloworld-go-rule-0-path-0-wrr": {
|
||||
Weighted: &dynamic.WeightedRoundRobin{
|
||||
Services: []dynamic.WRRService{
|
||||
{
|
||||
Name: "default-helloworld-go-rule-0-path-0-split-0",
|
||||
Weight: ptr.To(50),
|
||||
Headers: map[string]string{
|
||||
"Knative-Serving-Namespace": "default",
|
||||
"Knative-Serving-Revision": "helloworld-go-00001",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "default-helloworld-go-rule-0-path-0-split-1",
|
||||
Weight: ptr.To(50),
|
||||
Headers: map[string]string{
|
||||
"Knative-Serving-Namespace": "default",
|
||||
"Knative-Serving-Revision": "helloworld-go-00002",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
k8sObjects, knObjects := readResources(t, testCase.paths)
|
||||
|
||||
k8sClient := kubefake.NewClientset(k8sObjects...)
|
||||
knClient := knfake.NewSimpleClientset(knObjects...)
|
||||
|
||||
client := newClientImpl(knClient, k8sClient)
|
||||
|
||||
eventCh, err := client.WatchAll(nil, make(chan struct{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(k8sObjects) > 0 || len(knObjects) > 0 {
|
||||
// just wait for the first event
|
||||
<-eventCh
|
||||
}
|
||||
|
||||
p := Provider{
|
||||
PublicEntrypoints: []string{"http", "https"},
|
||||
PrivateEntrypoints: []string{"priv-http", "priv-https"},
|
||||
client: client,
|
||||
}
|
||||
|
||||
got, gotIngresses := p.loadConfiguration(t.Context())
|
||||
assert.Len(t, gotIngresses, testCase.wantLen)
|
||||
assert.Equal(t, testCase.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildRule(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
hosts []string
|
||||
headers map[string]knativenetworkingv1alpha1.HeaderMatch
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
desc: "single host, no headers, no path",
|
||||
hosts: []string{"example.com"},
|
||||
want: "(Host(`example.com`))",
|
||||
},
|
||||
{
|
||||
desc: "multiple hosts, no headers, no path",
|
||||
hosts: []string{"example.com", "foo.com"},
|
||||
want: "(Host(`example.com`) || Host(`foo.com`))",
|
||||
},
|
||||
{
|
||||
desc: "single host, single header, no path",
|
||||
hosts: []string{"example.com"},
|
||||
headers: map[string]knativenetworkingv1alpha1.HeaderMatch{
|
||||
"X-Header": {Exact: "value"},
|
||||
},
|
||||
want: "(Host(`example.com`)) && (Header(`X-Header`,`value`))",
|
||||
},
|
||||
{
|
||||
desc: "single host, multiple headers, no path",
|
||||
hosts: []string{"example.com"},
|
||||
headers: map[string]knativenetworkingv1alpha1.HeaderMatch{
|
||||
"X-Header": {Exact: "value"},
|
||||
"X-Header2": {Exact: "value2"},
|
||||
},
|
||||
want: "(Host(`example.com`)) && (Header(`X-Header`,`value`) && Header(`X-Header2`,`value2`))",
|
||||
},
|
||||
{
|
||||
desc: "single host, multiple headers, with path",
|
||||
hosts: []string{"example.com"},
|
||||
headers: map[string]knativenetworkingv1alpha1.HeaderMatch{
|
||||
"X-Header": {Exact: "value"},
|
||||
"X-Header2": {Exact: "value2"},
|
||||
},
|
||||
path: "/foo",
|
||||
want: "(Host(`example.com`)) && (Header(`X-Header`,`value`) && Header(`X-Header2`,`value2`)) && PathPrefix(`/foo`)",
|
||||
},
|
||||
{
|
||||
desc: "single host, no headers, with path",
|
||||
hosts: []string{"example.com"},
|
||||
path: "/foo",
|
||||
want: "(Host(`example.com`)) && PathPrefix(`/foo`)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := buildRule(test.hosts, test.headers, test.path)
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_mergeHTTPConfigs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
configs []*dynamic.HTTPConfiguration
|
||||
want *dynamic.HTTPConfiguration
|
||||
}{
|
||||
{
|
||||
desc: "one empty configuration",
|
||||
configs: []*dynamic.HTTPConfiguration{
|
||||
{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"router1": {Rule: "Host(`example.com`)"},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{
|
||||
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Routers: map[string]*dynamic.Router{},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
Services: map[string]*dynamic.Service{},
|
||||
},
|
||||
},
|
||||
want: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"router1": {Rule: "Host(`example.com`)"},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{
|
||||
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "merging two non-empty configurations",
|
||||
configs: []*dynamic.HTTPConfiguration{
|
||||
{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"router1": {Rule: "Host(`example.com`)"},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{
|
||||
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"router2": {Rule: "PathPrefix(`/test`)"},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{
|
||||
"middleware2": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"service2": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"router1": {Rule: "Host(`example.com`)"},
|
||||
"router2": {Rule: "PathPrefix(`/test`)"},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{
|
||||
"middleware1": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
|
||||
"middleware2": {Headers: &dynamic.Headers{CustomRequestHeaders: map[string]string{"X-Test": "value"}}},
|
||||
},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"service1": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
|
||||
"service2": {LoadBalancer: &dynamic.ServersLoadBalancer{Servers: []dynamic.Server{{URL: "http://example.com"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := mergeHTTPConfigs(test.configs...)
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func readResources(t *testing.T, paths []string) ([]runtime.Object, []runtime.Object) {
|
||||
t.Helper()
|
||||
|
||||
var (
|
||||
k8sObjects []runtime.Object
|
||||
knObjects []runtime.Object
|
||||
)
|
||||
for _, path := range paths {
|
||||
yamlContent, err := os.ReadFile(filepath.FromSlash("./fixtures/" + path))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
objects := k8s.MustParseYaml(yamlContent)
|
||||
for _, obj := range objects {
|
||||
switch obj.GetObjectKind().GroupVersionKind().Group {
|
||||
case "networking.internal.knative.dev":
|
||||
knObjects = append(knObjects, obj)
|
||||
default:
|
||||
k8sObjects = append(k8sObjects, obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return k8sObjects, knObjects
|
||||
}
|
@ -286,13 +286,13 @@ func (m *Manager) getWRRServiceHandler(ctx context.Context, serviceName string,
|
||||
}
|
||||
|
||||
func (m *Manager) getServiceHandler(ctx context.Context, service dynamic.WRRService) (http.Handler, error) {
|
||||
switch {
|
||||
case service.Status != nil:
|
||||
if service.Status != nil {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.WriteHeader(*service.Status)
|
||||
}), nil
|
||||
}
|
||||
|
||||
case service.GRPCStatus != nil:
|
||||
if service.GRPCStatus != nil {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
st := status.New(service.GRPCStatus.Code, service.GRPCStatus.Msg)
|
||||
|
||||
@ -307,10 +307,24 @@ func (m *Manager) getServiceHandler(ctx context.Context, service dynamic.WRRServ
|
||||
|
||||
_, _ = rw.Write(body)
|
||||
}), nil
|
||||
|
||||
default:
|
||||
return m.BuildHTTP(ctx, service.Name)
|
||||
}
|
||||
|
||||
svcHandler, err := m.BuildHTTP(ctx, service.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building HTTP service: %w", err)
|
||||
}
|
||||
|
||||
if service.Headers != nil {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
for k, v := range service.Headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
svcHandler.ServeHTTP(rw, req)
|
||||
}), nil
|
||||
}
|
||||
|
||||
return svcHandler, nil
|
||||
}
|
||||
|
||||
func (m *Manager) getHRWServiceHandler(ctx context.Context, serviceName string, config *dynamic.HighestRandomWeight) (http.Handler, error) {
|
||||
|
@ -160,7 +160,7 @@ func TestGetLoadBalancerServiceHandler(t *testing.T) {
|
||||
serviceName: "test",
|
||||
service: &dynamic.ServersLoadBalancer{
|
||||
Strategy: dynamic.BalancerStrategyWRR,
|
||||
PassHostHeader: boolPtr(true),
|
||||
PassHostHeader: pointer(true),
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: server1.URL,
|
||||
@ -479,16 +479,6 @@ func Test1xxResponses(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type serviceBuilderFunc func(ctx context.Context, serviceName string) (http.Handler, error)
|
||||
|
||||
func (s serviceBuilderFunc) BuildHTTP(ctx context.Context, serviceName string) (http.Handler, error) {
|
||||
return s(ctx, serviceName)
|
||||
}
|
||||
|
||||
type internalHandler struct{}
|
||||
|
||||
func (internalHandler) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {}
|
||||
|
||||
func TestManager_ServiceBuilders(t *testing.T) {
|
||||
var internalHandler internalHandler
|
||||
|
||||
@ -605,7 +595,129 @@ func TestMultipleTypeOnBuildHTTP(t *testing.T) {
|
||||
assert.Error(t, err, "cannot create service: multi-types service not supported, consider declaring two different pieces of service instead")
|
||||
}
|
||||
|
||||
func boolPtr(v bool) *bool { return &v }
|
||||
func TestGetServiceHandler_Headers(t *testing.T) {
|
||||
pb := httputil.NewProxyBuilder(&transportManagerMock{}, nil)
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
service dynamic.WRRService
|
||||
userAgent string
|
||||
expectedHeaders map[string]string
|
||||
}{
|
||||
{
|
||||
desc: "Service with custom headers",
|
||||
service: dynamic.WRRService{
|
||||
Name: "target-service",
|
||||
Headers: map[string]string{
|
||||
"X-Custom-Header": "custom-value",
|
||||
"X-Service-Type": "knative-service",
|
||||
"Authorization": "bearer token123",
|
||||
},
|
||||
},
|
||||
userAgent: "test-agent",
|
||||
expectedHeaders: map[string]string{
|
||||
"X-Custom-Header": "custom-value",
|
||||
"X-Service-Type": "knative-service",
|
||||
"Authorization": "bearer token123",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Service with empty headers map",
|
||||
service: dynamic.WRRService{
|
||||
Name: "target-service",
|
||||
Headers: map[string]string{},
|
||||
},
|
||||
userAgent: "test-agent",
|
||||
expectedHeaders: map[string]string{},
|
||||
},
|
||||
{
|
||||
desc: "Service with nil headers",
|
||||
service: dynamic.WRRService{
|
||||
Name: "target-service",
|
||||
Headers: nil,
|
||||
},
|
||||
userAgent: "test-agent",
|
||||
expectedHeaders: map[string]string{},
|
||||
},
|
||||
{
|
||||
desc: "Service with headers that override existing request headers",
|
||||
service: dynamic.WRRService{
|
||||
Name: "target-service",
|
||||
Headers: map[string]string{
|
||||
"User-Agent": "overridden-agent",
|
||||
"Accept": "application/json",
|
||||
},
|
||||
},
|
||||
userAgent: "original-agent",
|
||||
expectedHeaders: map[string]string{
|
||||
"User-Agent": "overridden-agent",
|
||||
"Accept": "application/json",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
// Create a test server that will verify the headers are properly set for this specific test case
|
||||
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Verify expected headers are present
|
||||
for key, expectedValue := range test.expectedHeaders {
|
||||
actualValue := r.Header.Get(key)
|
||||
assert.Equal(t, expectedValue, actualValue, "Header %s should be %s", key, expectedValue)
|
||||
}
|
||||
|
||||
w.Header().Set("X-Response", "success")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
t.Cleanup(testServer.Close)
|
||||
|
||||
// Create the target service that the WRRService will point to
|
||||
targetServiceInfo := &runtime.ServiceInfo{
|
||||
Service: &dynamic.Service{
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Strategy: dynamic.BalancerStrategyWRR,
|
||||
Servers: []dynamic.Server{
|
||||
{URL: testServer.URL},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a fresh manager for each test case
|
||||
sm := NewManager(map[string]*runtime.ServiceInfo{
|
||||
"target-service": targetServiceInfo,
|
||||
}, nil, nil, &transportManagerMock{}, pb)
|
||||
|
||||
// Get the service handler
|
||||
handler, err := sm.getServiceHandler(t.Context(), test.service)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, handler)
|
||||
|
||||
// Create a test request
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://test.example.com/path", nil)
|
||||
if test.userAgent != "" {
|
||||
req.Header.Set("User-Agent", test.userAgent)
|
||||
}
|
||||
|
||||
// Execute the request
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
// Verify the response was successful
|
||||
assert.Equal(t, http.StatusOK, recorder.Code)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type serviceBuilderFunc func(ctx context.Context, serviceName string) (http.Handler, error)
|
||||
|
||||
func (s serviceBuilderFunc) BuildHTTP(ctx context.Context, serviceName string) (http.Handler, error) {
|
||||
return s(ctx, serviceName)
|
||||
}
|
||||
|
||||
type internalHandler struct{}
|
||||
|
||||
func (internalHandler) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {}
|
||||
|
||||
type forwarderMock struct{}
|
||||
|
||||
|
11
webui/src/components/icons/providers/Knative.tsx
Normal file
11
webui/src/components/icons/providers/Knative.tsx
Normal file
@ -0,0 +1,11 @@
|
||||
import { ProviderIconProps } from 'components/icons/providers'
|
||||
|
||||
export default function Knative(props: ProviderIconProps) {
|
||||
return (
|
||||
<svg height="72" width="72" viewBox="-14 0 92 72" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="m30.42 7.074 14.142000000000001 6.8100000000000005 -2.745 4.752000000000001a0.804 0.804 0 0 0 -0.096 0.546l1.821 10.323a0.789 0.789 0 0 0 0.279 0.48l8.028 6.735c0.14400000000000002 0.123 0.33 0.192 0.522 0.192h5.6339999999999995l1.521 6.66a1.476 1.476 0 0 1 -0.28500000000000003 1.2449999999999999l-15.711 19.701a1.4729999999999999 1.4729999999999999 0 0 1 -1.149 0.552h-25.200000000000003a1.4729999999999999 1.4729999999999999 0 0 1 -1.149 -0.552L0.321 44.817a1.476 1.476 0 0 1 -0.28500000000000003 -1.2449999999999999l5.607 -24.567a1.482 1.482 0 0 1 0.798 -0.9990000000000001l22.701 -10.932a1.47 1.47 0 0 1 1.278 0ZM21.732 49.878h5.001v-7.286999999999999l1.92 -2.3520000000000003 5.466 9.639h5.8950000000000005l-7.782 -12.818999999999999 7.386000000000001 -9.507h-6.195l-5.067 7.419c-0.498 0.795 -1.026 1.59 -1.524 2.4509999999999996h-0.099v-9.870000000000001H21.732v22.326ZM57.842999999999996 7.055999999999999l8.925 3.2489999999999997c0.162 0.06 0.29700000000000004 0.17400000000000002 0.384 0.324l4.749 8.225999999999999c0.08700000000000001 0.15000000000000002 0.11699999999999999 0.324 0.08700000000000001 0.495l-1.6500000000000001 9.354a0.729 0.729 0 0 1 -0.249 0.43499999999999994l-7.2780000000000005 6.105a0.735 0.735 0 0 1 -0.471 0.17400000000000002h-9.498a0.738 0.738 0 0 1 -0.474 -0.17400000000000002l-7.2749999999999995 -6.105a0.72 0.72 0 0 1 -0.252 -0.43499999999999994l-1.6500000000000001 -9.354a0.732 0.732 0 0 1 0.08700000000000001 -0.495l4.749 -8.225999999999999a0.735 0.735 0 0 1 0.387 -0.324l8.925 -3.2489999999999997a0.729 0.729 0 0 1 0.504 0Zm-2.13 10.212c-0.096 -0.276 -0.29400000000000004 -0.41100000000000003 -0.591 -0.41100000000000003h-1.4609999999999999V25.71h2.37V19.347c0.264 -0.258 0.54 -0.45899999999999996 0.8340000000000001 -0.6000000000000001a2.082 2.082 0 0 1 0.9359999999999999 -0.21599999999999997c0.44699999999999995 0 0.783 0.135 1.014 0.40800000000000003 0.22799999999999998 0.273 0.342 0.654 0.342 1.146V25.71h2.361V20.085c0 -0.492 -0.063 -0.9450000000000001 -0.192 -1.356a2.964 2.964 0 0 0 -0.5760000000000001 -1.065 2.625 2.625 0 0 0 -0.9390000000000001 -0.6960000000000001 3.6239999999999997 3.6239999999999997 0 0 0 -2.0909999999999997 -0.162 3.5279999999999996 3.5279999999999996 0 0 0 -1.308 0.609 5.868 5.868 0 0 0 -0.552 0.471l-0.14700000000000002 -0.618Z"
|
||||
fill="currentColor"
|
||||
stroke-width="3"/>
|
||||
</svg>
|
||||
)
|
||||
}
|
@ -8,6 +8,7 @@ import File from 'components/icons/providers/File'
|
||||
import Http from 'components/icons/providers/Http'
|
||||
import Hub from 'components/icons/providers/Hub'
|
||||
import Internal from 'components/icons/providers/Internal'
|
||||
import Knative from "components/icons/providers/Knative";
|
||||
import Kubernetes from 'components/icons/providers/Kubernetes'
|
||||
import Nomad from 'components/icons/providers/Nomad'
|
||||
import Plugin from 'components/icons/providers/Plugin'
|
||||
@ -49,6 +50,9 @@ export default function ProviderIcon({ name, size = 32 }: { name: string; size?:
|
||||
if (['kubernetes'].some((prefix) => nameLowerCase.startsWith(prefix))) {
|
||||
return Kubernetes
|
||||
}
|
||||
if (['knative'].some((prefix) => nameLowerCase.startsWith(prefix))) {
|
||||
return Knative
|
||||
}
|
||||
if (['nomad', 'nomad-'].some((prefix) => nameLowerCase.startsWith(prefix))) {
|
||||
return Nomad
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user