mirror of
				https://github.com/traefik/traefik.git
				synced 2025-10-31 16:31:16 +01:00 
			
		
		
		
	Co-authored-by: Dmitry Sharshakov <d3dx12.xx@gmail.com> Co-authored-by: Julien Salleyron <julien.salleyron@gmail.com> Co-authored-by: Jean-Baptiste Doumenjou <925513+jbdoumenjou@users.noreply.github.com> Co-authored-by: Romain <rtribotte@users.noreply.github.com> Co-authored-by: Tom Moulard <tom.moulard@traefik.io>
		
			
				
	
	
		
			245 lines
		
	
	
		
			6.3 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			245 lines
		
	
	
		
			6.3 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| package wrr
 | |
| 
 | |
| import (
 | |
| 	"container/heap"
 | |
| 	"context"
 | |
| 	"errors"
 | |
| 	"fmt"
 | |
| 	"net/http"
 | |
| 	"sync"
 | |
| 
 | |
| 	"github.com/traefik/traefik/v2/pkg/config/dynamic"
 | |
| 	"github.com/traefik/traefik/v2/pkg/log"
 | |
| )
 | |
| 
 | |
| type namedHandler struct {
 | |
| 	http.Handler
 | |
| 	name     string
 | |
| 	weight   float64
 | |
| 	deadline float64
 | |
| }
 | |
| 
 | |
| type stickyCookie struct {
 | |
| 	name     string
 | |
| 	secure   bool
 | |
| 	httpOnly bool
 | |
| }
 | |
| 
 | |
| // Balancer is a WeightedRoundRobin load balancer based on Earliest Deadline First (EDF).
 | |
| // (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling)
 | |
| // Each pick from the schedule has the earliest deadline entry selected.
 | |
| // Entries have deadlines set at currentDeadline + 1 / weight,
 | |
| // providing weighted round robin behavior with floating point weights and an O(log n) pick time.
 | |
| type Balancer struct {
 | |
| 	stickyCookie     *stickyCookie
 | |
| 	wantsHealthCheck bool
 | |
| 
 | |
| 	mutex       sync.RWMutex
 | |
| 	handlers    []*namedHandler
 | |
| 	curDeadline float64
 | |
| 	// status is a record of which child services of the Balancer are healthy, keyed
 | |
| 	// by name of child service. A service is initially added to the map when it is
 | |
| 	// created via AddService, and it is later removed or added to the map as needed,
 | |
| 	// through the SetStatus method.
 | |
| 	status map[string]struct{}
 | |
| 	// updaters is the list of hooks that are run (to update the Balancer
 | |
| 	// parent(s)), whenever the Balancer status changes.
 | |
| 	updaters []func(bool)
 | |
| }
 | |
| 
 | |
| // New creates a new load balancer.
 | |
| func New(sticky *dynamic.Sticky, hc *dynamic.HealthCheck) *Balancer {
 | |
| 	balancer := &Balancer{
 | |
| 		status:           make(map[string]struct{}),
 | |
| 		wantsHealthCheck: hc != nil,
 | |
| 	}
 | |
| 	if sticky != nil && sticky.Cookie != nil {
 | |
| 		balancer.stickyCookie = &stickyCookie{
 | |
| 			name:     sticky.Cookie.Name,
 | |
| 			secure:   sticky.Cookie.Secure,
 | |
| 			httpOnly: sticky.Cookie.HTTPOnly,
 | |
| 		}
 | |
| 	}
 | |
| 	return balancer
 | |
| }
 | |
| 
 | |
| // Len implements heap.Interface/sort.Interface.
 | |
| func (b *Balancer) Len() int { return len(b.handlers) }
 | |
| 
 | |
| // Less implements heap.Interface/sort.Interface.
 | |
| func (b *Balancer) Less(i, j int) bool {
 | |
| 	return b.handlers[i].deadline < b.handlers[j].deadline
 | |
| }
 | |
| 
 | |
| // Swap implements heap.Interface/sort.Interface.
 | |
| func (b *Balancer) Swap(i, j int) {
 | |
| 	b.handlers[i], b.handlers[j] = b.handlers[j], b.handlers[i]
 | |
| }
 | |
| 
 | |
| // Push implements heap.Interface for pushing an item into the heap.
 | |
| func (b *Balancer) Push(x interface{}) {
 | |
| 	h, ok := x.(*namedHandler)
 | |
| 	if !ok {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	b.handlers = append(b.handlers, h)
 | |
| }
 | |
| 
 | |
| // Pop implements heap.Interface for poping an item from the heap.
 | |
| // It panics if b.Len() < 1.
 | |
| func (b *Balancer) Pop() interface{} {
 | |
| 	h := b.handlers[len(b.handlers)-1]
 | |
| 	b.handlers = b.handlers[0 : len(b.handlers)-1]
 | |
| 	return h
 | |
| }
 | |
| 
 | |
| // SetStatus sets on the balancer that its given child is now of the given
 | |
| // status. balancerName is only needed for logging purposes.
 | |
| func (b *Balancer) SetStatus(ctx context.Context, childName string, up bool) {
 | |
| 	b.mutex.Lock()
 | |
| 	defer b.mutex.Unlock()
 | |
| 
 | |
| 	upBefore := len(b.status) > 0
 | |
| 
 | |
| 	status := "DOWN"
 | |
| 	if up {
 | |
| 		status = "UP"
 | |
| 	}
 | |
| 	log.FromContext(ctx).Debugf("Setting status of %s to %v", childName, status)
 | |
| 	if up {
 | |
| 		b.status[childName] = struct{}{}
 | |
| 	} else {
 | |
| 		delete(b.status, childName)
 | |
| 	}
 | |
| 
 | |
| 	upAfter := len(b.status) > 0
 | |
| 	status = "DOWN"
 | |
| 	if upAfter {
 | |
| 		status = "UP"
 | |
| 	}
 | |
| 
 | |
| 	// No Status Change
 | |
| 	if upBefore == upAfter {
 | |
| 		// We're still with the same status, no need to propagate
 | |
| 		log.FromContext(ctx).Debugf("Still %s, no need to propagate", status)
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	// Status Change
 | |
| 	log.FromContext(ctx).Debugf("Propagating new %s status", status)
 | |
| 	for _, fn := range b.updaters {
 | |
| 		fn(upAfter)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // RegisterStatusUpdater adds fn to the list of hooks that are run when the
 | |
| // status of the Balancer changes.
 | |
| // Not thread safe.
 | |
| func (b *Balancer) RegisterStatusUpdater(fn func(up bool)) error {
 | |
| 	if !b.wantsHealthCheck {
 | |
| 		return errors.New("healthCheck not enabled in config for this weighted service")
 | |
| 	}
 | |
| 	b.updaters = append(b.updaters, fn)
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| var errNoAvailableServer = errors.New("no available server")
 | |
| 
 | |
| func (b *Balancer) nextServer() (*namedHandler, error) {
 | |
| 	b.mutex.Lock()
 | |
| 	defer b.mutex.Unlock()
 | |
| 
 | |
| 	if len(b.handlers) == 0 {
 | |
| 		return nil, fmt.Errorf("no servers in the pool")
 | |
| 	}
 | |
| 	if len(b.status) == 0 {
 | |
| 		return nil, errNoAvailableServer
 | |
| 	}
 | |
| 
 | |
| 	var handler *namedHandler
 | |
| 	for {
 | |
| 		// Pick handler with closest deadline.
 | |
| 		handler = heap.Pop(b).(*namedHandler)
 | |
| 
 | |
| 		// curDeadline should be handler's deadline so that new added entry would have a fair competition environment with the old ones.
 | |
| 		b.curDeadline = handler.deadline
 | |
| 		handler.deadline += 1 / handler.weight
 | |
| 
 | |
| 		heap.Push(b, handler)
 | |
| 		if _, ok := b.status[handler.name]; ok {
 | |
| 			break
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	log.WithoutContext().Debugf("Service selected by WRR: %s", handler.name)
 | |
| 	return handler, nil
 | |
| }
 | |
| 
 | |
| func (b *Balancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
 | |
| 	if b.stickyCookie != nil {
 | |
| 		cookie, err := req.Cookie(b.stickyCookie.name)
 | |
| 
 | |
| 		if err != nil && !errors.Is(err, http.ErrNoCookie) {
 | |
| 			log.WithoutContext().Warnf("Error while reading cookie: %v", err)
 | |
| 		}
 | |
| 
 | |
| 		if err == nil && cookie != nil {
 | |
| 			for _, handler := range b.handlers {
 | |
| 				if handler.name != cookie.Value {
 | |
| 					continue
 | |
| 				}
 | |
| 
 | |
| 				b.mutex.RLock()
 | |
| 				_, ok := b.status[handler.name]
 | |
| 				b.mutex.RUnlock()
 | |
| 				if !ok {
 | |
| 					// because we already are in the only iteration that matches the cookie, so none
 | |
| 					// of the following iterations are going to be a match for the cookie anyway.
 | |
| 					break
 | |
| 				}
 | |
| 
 | |
| 				handler.ServeHTTP(w, req)
 | |
| 				return
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	server, err := b.nextServer()
 | |
| 	if err != nil {
 | |
| 		if errors.Is(err, errNoAvailableServer) {
 | |
| 			http.Error(w, errNoAvailableServer.Error(), http.StatusServiceUnavailable)
 | |
| 		} else {
 | |
| 			http.Error(w, err.Error(), http.StatusInternalServerError)
 | |
| 		}
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	if b.stickyCookie != nil {
 | |
| 		cookie := &http.Cookie{Name: b.stickyCookie.name, Value: server.name, Path: "/", HttpOnly: b.stickyCookie.httpOnly, Secure: b.stickyCookie.secure}
 | |
| 		http.SetCookie(w, cookie)
 | |
| 	}
 | |
| 
 | |
| 	server.ServeHTTP(w, req)
 | |
| }
 | |
| 
 | |
| // AddService adds a handler.
 | |
| // A handler with a non-positive weight is ignored.
 | |
| func (b *Balancer) AddService(name string, handler http.Handler, weight *int) {
 | |
| 	w := 1
 | |
| 	if weight != nil {
 | |
| 		w = *weight
 | |
| 	}
 | |
| 	if w <= 0 { // non-positive weight is meaningless
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	h := &namedHandler{Handler: handler, name: name, weight: float64(w)}
 | |
| 
 | |
| 	b.mutex.Lock()
 | |
| 	h.deadline = b.curDeadline + 1/h.weight
 | |
| 	heap.Push(b, h)
 | |
| 	b.status[name] = struct{}{}
 | |
| 	b.mutex.Unlock()
 | |
| }
 |