diff --git a/docs/content/reference/dynamic-configuration/docker-labels.yml b/docs/content/reference/dynamic-configuration/docker-labels.yml index 42bbed1da..b2881a594 100644 --- a/docs/content/reference/dynamic-configuration/docker-labels.yml +++ b/docs/content/reference/dynamic-configuration/docker-labels.yml @@ -199,39 +199,39 @@ - "traefik.http.routers.router1.tls.domains[1].main=foobar" - "traefik.http.routers.router1.tls.domains[1].sans=foobar, foobar" - "traefik.http.routers.router1.tls.options=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.followredirects=true" -- "traefik.http.services.service02.loadbalancer.healthcheck.headers.name0=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.headers.name1=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.hostname=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.interval=42s" -- "traefik.http.services.service02.loadbalancer.healthcheck.method=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.mode=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.path=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.port=42" -- "traefik.http.services.service02.loadbalancer.healthcheck.scheme=foobar" -- "traefik.http.services.service02.loadbalancer.healthcheck.status=42" -- "traefik.http.services.service02.loadbalancer.healthcheck.timeout=42s" -- "traefik.http.services.service02.loadbalancer.healthcheck.unhealthyinterval=42s" -- "traefik.http.services.service02.loadbalancer.passhostheader=true" -- "traefik.http.services.service02.loadbalancer.passivehealthcheck.failurewindow=42s" -- "traefik.http.services.service02.loadbalancer.passivehealthcheck.maxfailedattempts=42" -- "traefik.http.services.service02.loadbalancer.responseforwarding.flushinterval=42s" -- "traefik.http.services.service02.loadbalancer.serverstransport=foobar" -- "traefik.http.services.service02.loadbalancer.sticky=true" -- "traefik.http.services.service02.loadbalancer.sticky.cookie=true" -- "traefik.http.services.service02.loadbalancer.sticky.cookie.domain=foobar" -- "traefik.http.services.service02.loadbalancer.sticky.cookie.httponly=true" -- "traefik.http.services.service02.loadbalancer.sticky.cookie.maxage=42" -- "traefik.http.services.service02.loadbalancer.sticky.cookie.name=foobar" -- "traefik.http.services.service02.loadbalancer.sticky.cookie.path=foobar" -- "traefik.http.services.service02.loadbalancer.sticky.cookie.samesite=foobar" -- "traefik.http.services.service02.loadbalancer.sticky.cookie.secure=true" -- "traefik.http.services.service02.loadbalancer.strategy=foobar" -- "traefik.http.services.service02.loadbalancer.server.port=foobar" -- "traefik.http.services.service02.loadbalancer.server.preservepath=true" -- "traefik.http.services.service02.loadbalancer.server.scheme=foobar" -- "traefik.http.services.service02.loadbalancer.server.url=foobar" -- "traefik.http.services.service02.loadbalancer.server.weight=42" +- "traefik.http.services.service03.loadbalancer.healthcheck.followredirects=true" +- "traefik.http.services.service03.loadbalancer.healthcheck.headers.name0=foobar" +- "traefik.http.services.service03.loadbalancer.healthcheck.headers.name1=foobar" +- "traefik.http.services.service03.loadbalancer.healthcheck.hostname=foobar" +- "traefik.http.services.service03.loadbalancer.healthcheck.interval=42s" +- "traefik.http.services.service03.loadbalancer.healthcheck.method=foobar" +- "traefik.http.services.service03.loadbalancer.healthcheck.mode=foobar" +- "traefik.http.services.service03.loadbalancer.healthcheck.path=foobar" +- "traefik.http.services.service03.loadbalancer.healthcheck.port=42" +- "traefik.http.services.service03.loadbalancer.healthcheck.scheme=foobar" +- "traefik.http.services.service03.loadbalancer.healthcheck.status=42" +- "traefik.http.services.service03.loadbalancer.healthcheck.timeout=42s" +- "traefik.http.services.service03.loadbalancer.healthcheck.unhealthyinterval=42s" +- "traefik.http.services.service03.loadbalancer.passhostheader=true" +- "traefik.http.services.service03.loadbalancer.passivehealthcheck.failurewindow=42s" +- "traefik.http.services.service03.loadbalancer.passivehealthcheck.maxfailedattempts=42" +- "traefik.http.services.service03.loadbalancer.responseforwarding.flushinterval=42s" +- "traefik.http.services.service03.loadbalancer.serverstransport=foobar" +- "traefik.http.services.service03.loadbalancer.sticky=true" +- "traefik.http.services.service03.loadbalancer.sticky.cookie=true" +- "traefik.http.services.service03.loadbalancer.sticky.cookie.domain=foobar" +- "traefik.http.services.service03.loadbalancer.sticky.cookie.httponly=true" +- "traefik.http.services.service03.loadbalancer.sticky.cookie.maxage=42" +- "traefik.http.services.service03.loadbalancer.sticky.cookie.name=foobar" +- "traefik.http.services.service03.loadbalancer.sticky.cookie.path=foobar" +- "traefik.http.services.service03.loadbalancer.sticky.cookie.samesite=foobar" +- "traefik.http.services.service03.loadbalancer.sticky.cookie.secure=true" +- "traefik.http.services.service03.loadbalancer.strategy=foobar" +- "traefik.http.services.service03.loadbalancer.server.port=foobar" +- "traefik.http.services.service03.loadbalancer.server.preservepath=true" +- "traefik.http.services.service03.loadbalancer.server.scheme=foobar" +- "traefik.http.services.service03.loadbalancer.server.url=foobar" +- "traefik.http.services.service03.loadbalancer.server.weight=42" - "traefik.tcp.middlewares.tcpmiddleware01.ipallowlist.sourcerange=foobar, foobar" - "traefik.tcp.middlewares.tcpmiddleware02.ipwhitelist.sourcerange=foobar, foobar" - "traefik.tcp.middlewares.tcpmiddleware03.inflightconn.amount=42" diff --git a/docs/content/reference/dynamic-configuration/file.toml b/docs/content/reference/dynamic-configuration/file.toml index 14d9753ce..48ad2a9d2 100644 --- a/docs/content/reference/dynamic-configuration/file.toml +++ b/docs/content/reference/dynamic-configuration/file.toml @@ -55,12 +55,23 @@ fallback = "foobar" [http.services.Service01.failover.healthCheck] [http.services.Service02] - [http.services.Service02.loadBalancer] + [http.services.Service02.highestRandomWeight] + + [[http.services.Service02.highestRandomWeight.services]] + name = "foobar" + weight = 42 + + [[http.services.Service02.highestRandomWeight.services]] + name = "foobar" + weight = 42 + [http.services.Service02.highestRandomWeight.healthCheck] + [http.services.Service03] + [http.services.Service03.loadBalancer] strategy = "foobar" passHostHeader = true serversTransport = "foobar" - [http.services.Service02.loadBalancer.sticky] - [http.services.Service02.loadBalancer.sticky.cookie] + [http.services.Service03.loadBalancer.sticky] + [http.services.Service03.loadBalancer.sticky.cookie] name = "foobar" secure = true httpOnly = true @@ -69,16 +80,16 @@ path = "foobar" domain = "foobar" - [[http.services.Service02.loadBalancer.servers]] + [[http.services.Service03.loadBalancer.servers]] url = "foobar" weight = 42 preservePath = true - [[http.services.Service02.loadBalancer.servers]] + [[http.services.Service03.loadBalancer.servers]] url = "foobar" weight = 42 preservePath = true - [http.services.Service02.loadBalancer.healthCheck] + [http.services.Service03.loadBalancer.healthCheck] scheme = "foobar" mode = "foobar" path = "foobar" @@ -90,40 +101,40 @@ timeout = "42s" hostname = "foobar" followRedirects = true - [http.services.Service02.loadBalancer.healthCheck.headers] + [http.services.Service03.loadBalancer.healthCheck.headers] name0 = "foobar" name1 = "foobar" - [http.services.Service02.loadBalancer.passiveHealthCheck] + [http.services.Service03.loadBalancer.passiveHealthCheck] failureWindow = "42s" maxFailedAttempts = 42 - [http.services.Service02.loadBalancer.responseForwarding] + [http.services.Service03.loadBalancer.responseForwarding] flushInterval = "42s" - [http.services.Service03] - [http.services.Service03.mirroring] + [http.services.Service04] + [http.services.Service04.mirroring] service = "foobar" mirrorBody = true maxBodySize = 42 - [[http.services.Service03.mirroring.mirrors]] + [[http.services.Service04.mirroring.mirrors]] name = "foobar" percent = 42 - [[http.services.Service03.mirroring.mirrors]] + [[http.services.Service04.mirroring.mirrors]] name = "foobar" percent = 42 - [http.services.Service03.mirroring.healthCheck] - [http.services.Service04] - [http.services.Service04.weighted] + [http.services.Service04.mirroring.healthCheck] + [http.services.Service05] + [http.services.Service05.weighted] - [[http.services.Service04.weighted.services]] + [[http.services.Service05.weighted.services]] name = "foobar" weight = 42 - [[http.services.Service04.weighted.services]] + [[http.services.Service05.weighted.services]] name = "foobar" weight = 42 - [http.services.Service04.weighted.sticky] - [http.services.Service04.weighted.sticky.cookie] + [http.services.Service05.weighted.sticky] + [http.services.Service05.weighted.sticky.cookie] name = "foobar" secure = true httpOnly = true @@ -131,7 +142,7 @@ maxAge = 42 path = "foobar" domain = "foobar" - [http.services.Service04.weighted.healthCheck] + [http.services.Service05.weighted.healthCheck] [http.middlewares] [http.middlewares.Middleware01] [http.middlewares.Middleware01.addPrefix] diff --git a/docs/content/reference/dynamic-configuration/file.yaml b/docs/content/reference/dynamic-configuration/file.yaml index 99a79e291..ab2574c50 100644 --- a/docs/content/reference/dynamic-configuration/file.yaml +++ b/docs/content/reference/dynamic-configuration/file.yaml @@ -65,6 +65,14 @@ http: fallback: foobar healthCheck: {} Service02: + highestRandomWeight: + services: + - name: foobar + weight: 42 + - name: foobar + weight: 42 + healthCheck: {} + Service03: loadBalancer: sticky: cookie: @@ -105,7 +113,7 @@ http: responseForwarding: flushInterval: 42s serversTransport: foobar - Service03: + Service04: mirroring: service: foobar mirrorBody: true @@ -116,7 +124,7 @@ http: - name: foobar percent: 42 healthCheck: {} - Service04: + Service05: weighted: services: - name: foobar diff --git a/docs/content/reference/dynamic-configuration/kv-ref.md b/docs/content/reference/dynamic-configuration/kv-ref.md index 8c0737abb..a6fad6ee9 100644 --- a/docs/content/reference/dynamic-configuration/kv-ref.md +++ b/docs/content/reference/dynamic-configuration/kv-ref.md @@ -274,58 +274,63 @@ THIS FILE MUST NOT BE EDITED BY HAND | `traefik/http/services/Service01/failover/fallback` | `foobar` | | `traefik/http/services/Service01/failover/healthCheck` | `` | | `traefik/http/services/Service01/failover/service` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/followRedirects` | `true` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/headers/name0` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/headers/name1` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/hostname` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/interval` | `42s` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/method` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/mode` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/path` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/port` | `42` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/scheme` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/status` | `42` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/timeout` | `42s` | -| `traefik/http/services/Service02/loadBalancer/healthCheck/unhealthyInterval` | `42s` | -| `traefik/http/services/Service02/loadBalancer/passHostHeader` | `true` | -| `traefik/http/services/Service02/loadBalancer/passiveHealthCheck/failureWindow` | `42s` | -| `traefik/http/services/Service02/loadBalancer/passiveHealthCheck/maxFailedAttempts` | `42` | -| `traefik/http/services/Service02/loadBalancer/responseForwarding/flushInterval` | `42s` | -| `traefik/http/services/Service02/loadBalancer/servers/0/preservePath` | `true` | -| `traefik/http/services/Service02/loadBalancer/servers/0/url` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/servers/0/weight` | `42` | -| `traefik/http/services/Service02/loadBalancer/servers/1/preservePath` | `true` | -| `traefik/http/services/Service02/loadBalancer/servers/1/url` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/servers/1/weight` | `42` | -| `traefik/http/services/Service02/loadBalancer/serversTransport` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/sticky/cookie/domain` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/sticky/cookie/httpOnly` | `true` | -| `traefik/http/services/Service02/loadBalancer/sticky/cookie/maxAge` | `42` | -| `traefik/http/services/Service02/loadBalancer/sticky/cookie/name` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/sticky/cookie/path` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/sticky/cookie/sameSite` | `foobar` | -| `traefik/http/services/Service02/loadBalancer/sticky/cookie/secure` | `true` | -| `traefik/http/services/Service02/loadBalancer/strategy` | `foobar` | -| `traefik/http/services/Service03/mirroring/healthCheck` | `` | -| `traefik/http/services/Service03/mirroring/maxBodySize` | `42` | -| `traefik/http/services/Service03/mirroring/mirrorBody` | `true` | -| `traefik/http/services/Service03/mirroring/mirrors/0/name` | `foobar` | -| `traefik/http/services/Service03/mirroring/mirrors/0/percent` | `42` | -| `traefik/http/services/Service03/mirroring/mirrors/1/name` | `foobar` | -| `traefik/http/services/Service03/mirroring/mirrors/1/percent` | `42` | -| `traefik/http/services/Service03/mirroring/service` | `foobar` | -| `traefik/http/services/Service04/weighted/healthCheck` | `` | -| `traefik/http/services/Service04/weighted/services/0/name` | `foobar` | -| `traefik/http/services/Service04/weighted/services/0/weight` | `42` | -| `traefik/http/services/Service04/weighted/services/1/name` | `foobar` | -| `traefik/http/services/Service04/weighted/services/1/weight` | `42` | -| `traefik/http/services/Service04/weighted/sticky/cookie/domain` | `foobar` | -| `traefik/http/services/Service04/weighted/sticky/cookie/httpOnly` | `true` | -| `traefik/http/services/Service04/weighted/sticky/cookie/maxAge` | `42` | -| `traefik/http/services/Service04/weighted/sticky/cookie/name` | `foobar` | -| `traefik/http/services/Service04/weighted/sticky/cookie/path` | `foobar` | -| `traefik/http/services/Service04/weighted/sticky/cookie/sameSite` | `foobar` | -| `traefik/http/services/Service04/weighted/sticky/cookie/secure` | `true` | +| `traefik/http/services/Service02/highestRandomWeight/healthCheck` | `` | +| `traefik/http/services/Service02/highestRandomWeight/services/0/name` | `foobar` | +| `traefik/http/services/Service02/highestRandomWeight/services/0/weight` | `42` | +| `traefik/http/services/Service02/highestRandomWeight/services/1/name` | `foobar` | +| `traefik/http/services/Service02/highestRandomWeight/services/1/weight` | `42` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/followRedirects` | `true` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/headers/name0` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/headers/name1` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/hostname` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/interval` | `42s` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/method` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/mode` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/path` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/port` | `42` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/scheme` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/status` | `42` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/timeout` | `42s` | +| `traefik/http/services/Service03/loadBalancer/healthCheck/unhealthyInterval` | `42s` | +| `traefik/http/services/Service03/loadBalancer/passHostHeader` | `true` | +| `traefik/http/services/Service03/loadBalancer/passiveHealthCheck/failureWindow` | `42s` | +| `traefik/http/services/Service03/loadBalancer/passiveHealthCheck/maxFailedAttempts` | `42` | +| `traefik/http/services/Service03/loadBalancer/responseForwarding/flushInterval` | `42s` | +| `traefik/http/services/Service03/loadBalancer/servers/0/preservePath` | `true` | +| `traefik/http/services/Service03/loadBalancer/servers/0/url` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/servers/0/weight` | `42` | +| `traefik/http/services/Service03/loadBalancer/servers/1/preservePath` | `true` | +| `traefik/http/services/Service03/loadBalancer/servers/1/url` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/servers/1/weight` | `42` | +| `traefik/http/services/Service03/loadBalancer/serversTransport` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/sticky/cookie/domain` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/sticky/cookie/httpOnly` | `true` | +| `traefik/http/services/Service03/loadBalancer/sticky/cookie/maxAge` | `42` | +| `traefik/http/services/Service03/loadBalancer/sticky/cookie/name` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/sticky/cookie/path` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/sticky/cookie/sameSite` | `foobar` | +| `traefik/http/services/Service03/loadBalancer/sticky/cookie/secure` | `true` | +| `traefik/http/services/Service03/loadBalancer/strategy` | `foobar` | +| `traefik/http/services/Service04/mirroring/healthCheck` | `` | +| `traefik/http/services/Service04/mirroring/maxBodySize` | `42` | +| `traefik/http/services/Service04/mirroring/mirrorBody` | `true` | +| `traefik/http/services/Service04/mirroring/mirrors/0/name` | `foobar` | +| `traefik/http/services/Service04/mirroring/mirrors/0/percent` | `42` | +| `traefik/http/services/Service04/mirroring/mirrors/1/name` | `foobar` | +| `traefik/http/services/Service04/mirroring/mirrors/1/percent` | `42` | +| `traefik/http/services/Service04/mirroring/service` | `foobar` | +| `traefik/http/services/Service05/weighted/healthCheck` | `` | +| `traefik/http/services/Service05/weighted/services/0/name` | `foobar` | +| `traefik/http/services/Service05/weighted/services/0/weight` | `42` | +| `traefik/http/services/Service05/weighted/services/1/name` | `foobar` | +| `traefik/http/services/Service05/weighted/services/1/weight` | `42` | +| `traefik/http/services/Service05/weighted/sticky/cookie/domain` | `foobar` | +| `traefik/http/services/Service05/weighted/sticky/cookie/httpOnly` | `true` | +| `traefik/http/services/Service05/weighted/sticky/cookie/maxAge` | `42` | +| `traefik/http/services/Service05/weighted/sticky/cookie/name` | `foobar` | +| `traefik/http/services/Service05/weighted/sticky/cookie/path` | `foobar` | +| `traefik/http/services/Service05/weighted/sticky/cookie/sameSite` | `foobar` | +| `traefik/http/services/Service05/weighted/sticky/cookie/secure` | `true` | | `traefik/tcp/middlewares/TCPMiddleware01/ipAllowList/sourceRange/0` | `foobar` | | `traefik/tcp/middlewares/TCPMiddleware01/ipAllowList/sourceRange/1` | `foobar` | | `traefik/tcp/middlewares/TCPMiddleware02/ipWhiteList/sourceRange/0` | `foobar` | diff --git a/docs/content/routing/services/index.md b/docs/content/routing/services/index.md index fddf2c953..4afcd9e46 100644 --- a/docs/content/routing/services/index.md +++ b/docs/content/routing/services/index.md @@ -175,6 +175,7 @@ Two load balancing algorithms are supported: - Weighed round-robin (wrr) - Power of two choices (p2c) +- Highest Random Weight (hrw) ##### WRR @@ -242,6 +243,34 @@ Power of two choices algorithm is a load balancing strategy that selects two ser url = "http://private-ip-server-3/" ``` +##### HRW + +HighestRandomWeight, also called RendezVous hashing allows to loadbalance clients in a pool of services or servers. + +??? example "Load Balancing HRW with-- Using the [File Provider](../../providers/file.md)" + + ```yaml tab="YAML" + ## Dynamic configuration + http: + services: + my-service: + loadBalancer: + type: hrw + servers: + - url: "http://private-ip-server-1/" + - url: "http://private-ip-server-2/" + ``` + + ```toml tab="TOML" + ## Dynamic configuration + [http.services] + [http.services.my-service.loadBalancer] + [[http.services.my-service.loadBalancer.servers]] + url = "http://private-ip-server-1/" + [[http.services.my-service.loadBalancer.servers]] + url = "http://private-ip-server-2/" + ``` + #### Sticky sessions When sticky sessions are enabled, a `Set-Cookie` header is set on the initial response to let the client know which server handles the first response. @@ -1253,6 +1282,147 @@ http: url = "http://private-ip-server-2/" ``` +### Highest Random Weight (service) + +The HRW is able to load balance the requests between multiple services based on weights. + +This strategy is only available to load balance between [services](./index.md) and not between [servers](./index.md#servers). + +!!! info "Supported Providers" + + This strategy can be defined currently with the [File](../../providers/file.md) or [IngressRoute](../../providers/kubernetes-crd.md) providers. + +```yaml tab="YAML" +## Dynamic configuration +http: + services: + app: + highestRandomWeight: + services: + - name: appv1 + weight: 3 + - name: appv2 + weight: 1 + + appv1: + loadBalancer: + type: hrw + servers: + - url: "http://private-ip-server-1/" + + appv2: + loadBalancer: + type: hrw + servers: + - url: "http://private-ip-server-2/" +``` + +```toml tab="TOML" +## Dynamic configuration +[http.services] + [http.services.app] + [[http.services.app.highestRandomWeight.services]] + name = "appv1" + weight = 3 + [[http.services.app.highestRandomWeight.services]] + name = "appv2" + weight = 1 + + [http.services.appv1] + [http.services.appv1.loadBalancer] + type = "hrw" + [[http.services.appv1.loadBalancer.servers]] + url = "http://private-ip-server-1/" + + [http.services.appv2] + [http.services.appv2.loadBalancer] + type = "hrw" + [[http.services.appv2.loadBalancer.servers]] + url = "http://private-ip-server-2/" +``` + +#### Health Check + +HealthCheck enables automatic self-healthcheck for this service, i.e. whenever +one of its children is reported as down, this service becomes aware of it, and +takes it into account (i.e. it ignores the down child) when running the +load-balancing algorithm. In addition, if the parent of this service also has +HealthCheck enabled, this service reports to its parent any status change. + +!!! info "All or nothing" + + If HealthCheck is enabled for a given service, but any of its descendants does + not have it enabled, the creation of the service will fail. + + HealthCheck on Weighted services can be defined currently only with the [File](../../providers/file.md) provider. + +```yaml tab="YAML" +## Dynamic configuration +http: + services: + app: + highestRandomWeight: + healthCheck: {} + services: + - name: appv1 + weight: 3 + - name: appv2 + weight: 1 + + appv1: + loadBalancer: + type: hrw + healthCheck: + path: /status + interval: 10s + timeout: 3s + servers: + - url: "http://private-ip-server-1/" + + appv2: + loadBalancer: + type: hrw + healthCheck: + path: /status + interval: 10s + timeout: 3s + servers: + - url: "http://private-ip-server-2/" +``` + +```toml tab="TOML" +## Dynamic configuration +[http.services] + [http.services.app] + [http.services.app.highestRandomWeight.healthCheck] + [[http.services.app.highestRandomWeight.services]] + name = "appv1" + weight = 3 + [[http.services.app.highestRandomWeight.services]] + name = "appv2" + weight = 1 + + [http.services.appv1] + [http.services.appv1.loadBalancer] + type="hrw" + [http.services.appv1.loadBalancer.healthCheck] + path = "/health" + interval = "10s" + timeout = "3s" + [[http.services.appv1.loadBalancer.servers]] + url = "http://private-ip-server-1/" + + [http.services.appv2] + [http.services.appv2.loadBalancer] + type="hrw" + [http.services.appv2.loadBalancer.healthCheck] + path = "/health" + interval = "10s" + timeout = "3s" + [[http.services.appv2.loadBalancer.servers]] + url = "http://private-ip-server-2/" +``` + ### Mirroring (service) The mirroring is able to mirror requests sent to a service to other services. diff --git a/pkg/config/dynamic/http_config.go b/pkg/config/dynamic/http_config.go index a60f59713..4fbf06d58 100644 --- a/pkg/config/dynamic/http_config.go +++ b/pkg/config/dynamic/http_config.go @@ -53,10 +53,11 @@ type Model struct { // Service holds a service configuration (can only be of one type at the same time). type Service struct { - LoadBalancer *ServersLoadBalancer `json:"loadBalancer,omitempty" toml:"loadBalancer,omitempty" yaml:"loadBalancer,omitempty" export:"true"` - Weighted *WeightedRoundRobin `json:"weighted,omitempty" toml:"weighted,omitempty" yaml:"weighted,omitempty" label:"-" export:"true"` - Mirroring *Mirroring `json:"mirroring,omitempty" toml:"mirroring,omitempty" yaml:"mirroring,omitempty" label:"-" export:"true"` - Failover *Failover `json:"failover,omitempty" toml:"failover,omitempty" yaml:"failover,omitempty" label:"-" export:"true"` + LoadBalancer *ServersLoadBalancer `json:"loadBalancer,omitempty" toml:"loadBalancer,omitempty" yaml:"loadBalancer,omitempty" export:"true"` + HighestRandomWeight *HighestRandomWeight `json:"highestRandomWeight,omitempty" toml:"highestRandomWeight,omitempty" yaml:"highestRandomWeight,omitempty" label:"-" export:"true"` + Weighted *WeightedRoundRobin `json:"weighted,omitempty" toml:"weighted,omitempty" yaml:"weighted,omitempty" label:"-" export:"true"` + Mirroring *Mirroring `json:"mirroring,omitempty" toml:"mirroring,omitempty" yaml:"mirroring,omitempty" label:"-" export:"true"` + Failover *Failover `json:"failover,omitempty" toml:"failover,omitempty" yaml:"failover,omitempty" label:"-" export:"true"` } // +k8s:deepcopy-gen=true @@ -157,6 +158,19 @@ type WeightedRoundRobin struct { // +k8s:deepcopy-gen=true +// HighestRandomWeight is a weighted sticky load-balancer of services. +type HighestRandomWeight struct { + Services []HRWService `json:"services,omitempty" toml:"services,omitempty" yaml:"services,omitempty" export:"true"` + // HealthCheck enables automatic self-healthcheck for this service, i.e. + // whenever one of its children is reported as down, this service becomes aware of it, + // and takes it into account (i.e. it ignores the down child) when running the + // load-balancing algorithm. In addition, if the parent of this service also has + // HealthCheck enabled, this service reports to its parent any status change. + HealthCheck *HealthCheck `json:"healthCheck,omitempty" toml:"healthCheck,omitempty" yaml:"healthCheck,omitempty" label:"allowEmpty" file:"allowEmpty" kv:"allowEmpty" export:"true"` +} + +// +k8s:deepcopy-gen=true + // WRRService is a reference to a service load-balanced with weighted round-robin. type WRRService struct { Name string `json:"name,omitempty" toml:"name,omitempty" yaml:"name,omitempty" export:"true"` @@ -170,6 +184,20 @@ type WRRService struct { GRPCStatus *GRPCStatus `json:"-" toml:"-" yaml:"-" label:"-" file:"-"` } +// +k8s:deepcopy-gen=true + +// HRWService is a reference to a service load-balanced with highest random weight. +type HRWService struct { + Name string `json:"name,omitempty" toml:"name,omitempty" yaml:"name,omitempty" export:"true"` + Weight *int `json:"weight,omitempty" toml:"weight,omitempty" yaml:"weight,omitempty" export:"true"` +} + +// SetDefaults Default values for a HRWService. +func (w *HRWService) SetDefaults() { + defaultWeight := 1 + w.Weight = &defaultWeight +} + // SetDefaults Default values for a WRRService. func (w *WRRService) SetDefaults() { defaultWeight := 1 @@ -231,6 +259,8 @@ const ( BalancerStrategyWRR BalancerStrategy = "wrr" // BalancerStrategyP2C is the power of two choices strategy. BalancerStrategyP2C BalancerStrategy = "p2c" + // BalancerStrategyHRW is the power of two choices strategy. + BalancerStrategyHRW BalancerStrategy = "hrw" ) // +k8s:deepcopy-gen=true diff --git a/pkg/config/dynamic/zz_generated.deepcopy.go b/pkg/config/dynamic/zz_generated.deepcopy.go index 94d0f11a8..65425d028 100644 --- a/pkg/config/dynamic/zz_generated.deepcopy.go +++ b/pkg/config/dynamic/zz_generated.deepcopy.go @@ -449,6 +449,27 @@ func (in *GrpcWeb) DeepCopy() *GrpcWeb { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HRWService) DeepCopyInto(out *HRWService) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HRWService. +func (in *HRWService) DeepCopy() *HRWService { + if in == nil { + return nil + } + out := new(HRWService) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPConfiguration) DeepCopyInto(out *HTTPConfiguration) { *out = *in @@ -688,6 +709,34 @@ func (in *HealthCheck) DeepCopy() *HealthCheck { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighestRandomWeight) DeepCopyInto(out *HighestRandomWeight) { + *out = *in + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]HRWService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheck) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighestRandomWeight. +func (in *HighestRandomWeight) DeepCopy() *HighestRandomWeight { + if in == nil { + return nil + } + out := new(HighestRandomWeight) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPAllowList) DeepCopyInto(out *IPAllowList) { *out = *in @@ -1566,6 +1615,11 @@ func (in *Service) DeepCopyInto(out *Service) { *out = new(ServersLoadBalancer) (*in).DeepCopyInto(*out) } + if in.HighestRandomWeight != nil { + in, out := &in.HighestRandomWeight, &out.HighestRandomWeight + *out = new(HighestRandomWeight) + (*in).DeepCopyInto(*out) + } if in.Weighted != nil { in, out := &in.Weighted, &out.Weighted *out = new(WeightedRoundRobin) diff --git a/pkg/server/service/loadbalancer/hrw/hrw.go b/pkg/server/service/loadbalancer/hrw/hrw.go new file mode 100644 index 000000000..475873ece --- /dev/null +++ b/pkg/server/service/loadbalancer/hrw/hrw.go @@ -0,0 +1,200 @@ +package hrw + +import ( + "context" + "errors" + "hash/fnv" + "math" + "net/http" + "sync" + + "github.com/rs/zerolog/log" + "github.com/traefik/traefik/v3/pkg/config/dynamic" + "github.com/traefik/traefik/v3/pkg/ip" +) + +type namedHandler struct { + http.Handler + name string + weight float64 +} + +// Balancer implements the Rendezvous Hashing algorithm for load balancing. +// The idea is to compute a score for each available backend using a hash of the client's +// source (for example, IP) combined with the backend's identifier, and assign the client +// to the backend with the highest score. This ensures that each client consistently +// connects to the same backend while distributing load evenly across all backends. +type Balancer struct { + wantsHealthCheck bool + + strategy ip.RemoteAddrStrategy + + handlersMu sync.RWMutex + // References all the handlers by name and also by the hashed value of the name. + handlers []*namedHandler + // status is a record of which child services of the Balancer are healthy, keyed + // by name of child service. A service is initially added to the map when it is + // created via Add, and it is later removed or added to the map as needed, + // through the SetStatus method. + status map[string]struct{} + // updaters is the list of hooks that are run (to update the Balancer + // parent(s)), whenever the Balancer status changes. + updaters []func(bool) + // fenced is the list of terminating yet still serving child services. + fenced map[string]struct{} +} + +// New creates a new load balancer. +func New(wantHealthCheck bool) *Balancer { + balancer := &Balancer{ + status: make(map[string]struct{}), + fenced: make(map[string]struct{}), + wantsHealthCheck: wantHealthCheck, + strategy: ip.RemoteAddrStrategy{}, + } + + return balancer +} + +// getNodeScore calculates the score of the couple of src and handler name. +func getNodeScore(handler *namedHandler, src string) float64 { + h := fnv.New64a() + h.Write([]byte(src + handler.name)) + sum := h.Sum64() + score := float64(sum) / math.Pow(2, 64) + logScore := 1.0 / -math.Log(score) + + return logScore * handler.weight +} + +// SetStatus sets on the balancer that its given child is now of the given +// status. balancerName is only needed for logging purposes. +func (b *Balancer) SetStatus(ctx context.Context, childName string, up bool) { + b.handlersMu.Lock() + defer b.handlersMu.Unlock() + + upBefore := len(b.status) > 0 + + status := "DOWN" + if up { + status = "UP" + } + + log.Ctx(ctx).Debug().Msgf("Setting status of %s to %v", childName, status) + + if up { + b.status[childName] = struct{}{} + } else { + delete(b.status, childName) + } + + upAfter := len(b.status) > 0 + status = "DOWN" + if upAfter { + status = "UP" + } + + // No Status Change + if upBefore == upAfter { + // We're still with the same status, no need to propagate + log.Ctx(ctx).Debug().Msgf("Still %s, no need to propagate", status) + return + } + + // Status Change + log.Ctx(ctx).Debug().Msgf("Propagating new %s status", status) + for _, fn := range b.updaters { + fn(upAfter) + } +} + +// RegisterStatusUpdater adds fn to the list of hooks that are run when the +// status of the Balancer changes. +// Not thread safe. +func (b *Balancer) RegisterStatusUpdater(fn func(up bool)) error { + if !b.wantsHealthCheck { + return errors.New("healthCheck not enabled in config for this weighted service") + } + b.updaters = append(b.updaters, fn) + + return nil +} + +var errNoAvailableServer = errors.New("no available server") + +func (b *Balancer) nextServer(ip string) (*namedHandler, error) { + b.handlersMu.RLock() + var healthy []*namedHandler + for _, h := range b.handlers { + if _, ok := b.status[h.name]; ok { + if _, fenced := b.fenced[h.name]; !fenced { + healthy = append(healthy, h) + } + } + } + b.handlersMu.RUnlock() + + if len(healthy) == 0 { + return nil, errNoAvailableServer + } + + var handler *namedHandler + score := 0.0 + for _, h := range healthy { + s := getNodeScore(h, ip) + if s > score { + handler = h + score = s + } + } + + log.Debug().Msgf("Service selected by HRW: %s", handler.name) + + return handler, nil +} + +func (b *Balancer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // give ip fetched to b.nextServer + clientIP := b.strategy.GetIP(req) + log.Debug().Msgf("ServeHTTP() clientIP=%s", clientIP) + + server, err := b.nextServer(clientIP) + if err != nil { + if errors.Is(err, errNoAvailableServer) { + http.Error(w, errNoAvailableServer.Error(), http.StatusServiceUnavailable) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + server.ServeHTTP(w, req) +} + +// AddServer adds a handler with a server. +func (b *Balancer) AddServer(name string, handler http.Handler, server dynamic.Server) { + b.Add(name, handler, server.Weight, server.Fenced) +} + +// Add adds a handler. +// A handler with a non-positive weight is ignored. +func (b *Balancer) Add(name string, handler http.Handler, weight *int, fenced bool) { + w := 1 + if weight != nil { + w = *weight + } + + if w <= 0 { // non-positive weight is meaningless + return + } + + h := &namedHandler{Handler: handler, name: name, weight: float64(w)} + + b.handlersMu.Lock() + b.handlers = append(b.handlers, h) + b.status[name] = struct{}{} + if fenced { + b.fenced[name] = struct{}{} + } + b.handlersMu.Unlock() +} diff --git a/pkg/server/service/loadbalancer/hrw/hrw_test.go b/pkg/server/service/loadbalancer/hrw/hrw_test.go new file mode 100644 index 000000000..d095bc361 --- /dev/null +++ b/pkg/server/service/loadbalancer/hrw/hrw_test.go @@ -0,0 +1,302 @@ +package hrw + +import ( + "context" + "encoding/binary" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +// genIPAddress generate randomly an IP address as a string. +func genIPAddress() string { + buf := make([]byte, 4) + + ip := rand.Uint32() + + binary.LittleEndian.PutUint32(buf, ip) + ipStr := net.IP(buf) + return ipStr.String() +} + +// initStatusArray initialize an array filled with status value for test assertions. +func initStatusArray(size int, value int) []int { + status := make([]int, 0, size) + for i := 1; i <= size; i++ { + status = append(status, value) + } + return status +} + +// Tests evaluate load balancing of single and multiple clients. +// Due to the randomness of IP Adresses, repartition between services is not perfect +// The tests validate repartition using a margin of 10% of the number of requests + +func TestBalancer(t *testing.T) { + balancer := New(false) + + balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "first") + rw.WriteHeader(http.StatusOK) + }), Int(4), false) + + balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "second") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + + recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + req := httptest.NewRequest(http.MethodGet, "/", nil) + for range 100 { + req.RemoteAddr = genIPAddress() + balancer.ServeHTTP(recorder, req) + } + assert.InDelta(t, 80, recorder.save["first"], 10) + assert.InDelta(t, 20, recorder.save["second"], 10) +} + +func TestBalancerNoService(t *testing.T) { + balancer := New(false) + + recorder := httptest.NewRecorder() + balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil)) + + assert.Equal(t, http.StatusServiceUnavailable, recorder.Result().StatusCode) +} + +func TestBalancerOneServerZeroWeight(t *testing.T) { + balancer := New(false) + + balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "first") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + + balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}), Int(0), false) + + recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + for range 3 { + balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil)) + } + + assert.Equal(t, 3, recorder.save["first"]) +} + +type key string + +const serviceName key = "serviceName" + +func TestBalancerNoServiceUp(t *testing.T) { + balancer := New(false) + + balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusInternalServerError) + }), Int(1), false) + + balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusInternalServerError) + }), Int(1), false) + + balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "first", false) + balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", false) + + recorder := httptest.NewRecorder() + balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil)) + + assert.Equal(t, http.StatusServiceUnavailable, recorder.Result().StatusCode) +} + +func TestBalancerOneServerDown(t *testing.T) { + balancer := New(false) + + balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "first") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + + balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusInternalServerError) + }), Int(1), false) + balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", false) + + recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + for range 3 { + balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil)) + } + + assert.Equal(t, 3, recorder.save["first"]) +} + +func TestBalancerDownThenUp(t *testing.T) { + balancer := New(false) + + balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "first") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + + balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "second") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", false) + + recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + for range 3 { + balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil)) + } + assert.Equal(t, 3, recorder.save["first"]) + + balancer.SetStatus(context.WithValue(t.Context(), serviceName, "parent"), "second", true) + recorder = &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + req := httptest.NewRequest(http.MethodGet, "/", nil) + for range 100 { + req.RemoteAddr = genIPAddress() + balancer.ServeHTTP(recorder, req) + } + assert.InDelta(t, 50, recorder.save["first"], 10) + assert.InDelta(t, 50, recorder.save["second"], 10) +} + +func TestBalancerPropagate(t *testing.T) { + balancer1 := New(true) + + balancer1.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "first") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + balancer1.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "second") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + + balancer2 := New(true) + balancer2.Add("third", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "third") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + balancer2.Add("fourth", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "fourth") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + + topBalancer := New(true) + topBalancer.Add("balancer1", balancer1, Int(1), false) + _ = balancer1.RegisterStatusUpdater(func(up bool) { + topBalancer.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "balancer1", up) + // TODO(mpl): if test gets flaky, add channel or something here to signal that + // propagation is done, and wait on it before sending request. + }) + topBalancer.Add("balancer2", balancer2, Int(1), false) + _ = balancer2.RegisterStatusUpdater(func(up bool) { + topBalancer.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "balancer2", up) + }) + + recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + req := httptest.NewRequest(http.MethodGet, "/", nil) + for range 100 { + req.RemoteAddr = genIPAddress() + topBalancer.ServeHTTP(recorder, req) + } + assert.InDelta(t, 25, recorder.save["first"], 10) + assert.InDelta(t, 25, recorder.save["second"], 10) + assert.InDelta(t, 25, recorder.save["third"], 10) + assert.InDelta(t, 25, recorder.save["fourth"], 10) + wantStatus := initStatusArray(100, 200) + assert.Equal(t, wantStatus, recorder.status) + + // fourth gets downed, but balancer2 still up since third is still up. + balancer2.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "fourth", false) + recorder = &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + req = httptest.NewRequest(http.MethodGet, "/", nil) + for range 100 { + req.RemoteAddr = genIPAddress() + topBalancer.ServeHTTP(recorder, req) + } + assert.InDelta(t, 25, recorder.save["first"], 10) + assert.InDelta(t, 25, recorder.save["second"], 10) + assert.InDelta(t, 50, recorder.save["third"], 10) + assert.InDelta(t, 0, recorder.save["fourth"], 0) + wantStatus = initStatusArray(100, 200) + assert.Equal(t, wantStatus, recorder.status) + + // third gets downed, and the propagation triggers balancer2 to be marked as + // down as well for topBalancer. + balancer2.SetStatus(context.WithValue(t.Context(), serviceName, "top"), "third", false) + recorder = &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + req = httptest.NewRequest(http.MethodGet, "/", nil) + for range 100 { + req.RemoteAddr = genIPAddress() + topBalancer.ServeHTTP(recorder, req) + } + assert.InDelta(t, 50, recorder.save["first"], 10) + assert.InDelta(t, 50, recorder.save["second"], 10) + assert.InDelta(t, 0, recorder.save["third"], 0) + assert.InDelta(t, 0, recorder.save["fourth"], 0) + wantStatus = initStatusArray(100, 200) + assert.Equal(t, wantStatus, recorder.status) +} + +func TestBalancerAllServersZeroWeight(t *testing.T) { + balancer := New(false) + + balancer.Add("test", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}), Int(0), false) + balancer.Add("test2", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}), Int(0), false) + + recorder := httptest.NewRecorder() + balancer.ServeHTTP(recorder, httptest.NewRequest(http.MethodGet, "/", nil)) + + assert.Equal(t, http.StatusServiceUnavailable, recorder.Result().StatusCode) +} + +func TestSticky(t *testing.T) { + balancer := New(false) + + balancer.Add("first", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "first") + rw.WriteHeader(http.StatusOK) + }), Int(1), false) + + balancer.Add("second", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("server", "second") + rw.WriteHeader(http.StatusOK) + }), Int(2), false) + + recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}} + + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.RemoteAddr = genIPAddress() + for range 10 { + for _, cookie := range recorder.Result().Cookies() { + req.AddCookie(cookie) + } + recorder.ResponseRecorder = httptest.NewRecorder() + + balancer.ServeHTTP(recorder, req) + } + + assert.True(t, recorder.save["first"] == 0 || recorder.save["first"] == 10) + assert.True(t, recorder.save["second"] == 0 || recorder.save["second"] == 10) + // from one IP, the choice between server must be the same for the 10 requests + // weight does not impose what would be chosen from 1 client +} + +func Int(v int) *int { return &v } + +type responseRecorder struct { + *httptest.ResponseRecorder + save map[string]int + sequence []string + status []int +} + +func (r *responseRecorder) WriteHeader(statusCode int) { + r.save[r.Header().Get("server")]++ + r.sequence = append(r.sequence, r.Header().Get("server")) + r.status = append(r.status, statusCode) + r.ResponseRecorder.WriteHeader(statusCode) +} diff --git a/pkg/server/service/service.go b/pkg/server/service/service.go index 6967f6e7b..f5e3c8cbf 100644 --- a/pkg/server/service/service.go +++ b/pkg/server/service/service.go @@ -28,6 +28,7 @@ import ( "github.com/traefik/traefik/v3/pkg/server/middleware" "github.com/traefik/traefik/v3/pkg/server/provider" "github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/failover" + "github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/hrw" "github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/mirror" "github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/p2c" "github.com/traefik/traefik/v3/pkg/server/service/loadbalancer/wrr" @@ -137,6 +138,13 @@ func (m *Manager) BuildHTTP(rootCtx context.Context, serviceName string) (http.H conf.AddError(err, true) return nil, err } + case conf.HighestRandomWeight != nil: + var err error + lb, err = m.getHRWServiceHandler(ctx, serviceName, conf.HighestRandomWeight) + if err != nil { + conf.AddError(err, true) + return nil, err + } case conf.Mirroring != nil: var err error lb, err = m.getMirrorServiceHandler(ctx, conf.Mirroring) @@ -305,6 +313,40 @@ func (m *Manager) getServiceHandler(ctx context.Context, service dynamic.WRRServ } } +func (m *Manager) getHRWServiceHandler(ctx context.Context, serviceName string, config *dynamic.HighestRandomWeight) (http.Handler, error) { + // TODO Handle accesslog and metrics with multiple service name + balancer := hrw.New(config.HealthCheck != nil) + for _, service := range shuffle(config.Services, m.rand) { + serviceHandler, err := m.BuildHTTP(ctx, service.Name) + if err != nil { + return nil, err + } + + balancer.Add(service.Name, serviceHandler, service.Weight, false) + + if config.HealthCheck == nil { + continue + } + + childName := service.Name + updater, ok := serviceHandler.(healthcheck.StatusUpdater) + if !ok { + return nil, fmt.Errorf("child service %v of %v not a healthcheck.StatusUpdater (%T)", childName, serviceName, serviceHandler) + } + + if err := updater.RegisterStatusUpdater(func(up bool) { + balancer.SetStatus(ctx, childName, up) + }); err != nil { + return nil, fmt.Errorf("cannot register %v as updater for %v: %w", childName, serviceName, err) + } + + log.Ctx(ctx).Debug().Str("parent", serviceName).Str("child", childName). + Msg("Child service will update parent on status change") + } + + return balancer, nil +} + type serverBalancer interface { http.Handler healthcheck.StatusSetter @@ -346,6 +388,8 @@ func (m *Manager) getLoadBalancerServiceHandler(ctx context.Context, serviceName lb = wrr.New(service.Sticky, service.HealthCheck != nil) case dynamic.BalancerStrategyP2C: lb = p2c.New(service.Sticky, service.HealthCheck != nil) + case dynamic.BalancerStrategyHRW: + lb = hrw.New(service.HealthCheck != nil) default: return nil, fmt.Errorf("unsupported load-balancer strategy %q", service.Strategy) }