mirror of
https://github.com/cloudnativelabs/kube-router.git
synced 2025-10-03 05:51:08 +02:00
* - added protocol & port label to metrics - removed some redundant code * added example dashboard * added dashboard screenshot * updated dashboard json & screenshot * ammend bad dashboard export * first new metric * . * more metrics: controller_publish_metrics_time & controller_iptables_sync_time * namespace redeclared * fix typo in name * smal fixes * new metric controller_bgp_peers & controller_bgp_internal_peers_sync_time * typo fix * new metric controller_ipvs_service_sync_time * fix * register metric * fix * fix * added more metrics * service controller log levels * fix * fix * added metrics controller * fixes * fix * fix * fixed more log levels * server and graceful shutdown * fix * fix * fix * code cleanup * docs * move metrics exporting to controller * fix * fix * fixes * fix * fix missing * fix * fix * test * test * fix * fix * fix * updated dashboard * updates to metric controller * fixed order in newmetricscontroller * err declared and not used * updated dashboard * updated dashboard screenshot * removed --metrics & changed --metrics-port to enable / disable metrics * https://github.com/cloudnativelabs/kube-router/issues/271 * cannot use config.MetricsPort (type uint16) as type int in assignment * cannot use mc.MetricsPort (type uint16) as type int in argument to strconv.Itoa * updated docs * changed default metric port to 0, disabled * added missing newline to .dockerignore * add lag parse to pickup on -v directives * test * test * test * fix regression * syntax error: non-declaration statement outside function body * fix * changed nsc to mc * updated docs * markdown fix * moved metrics registration out to respective controller so only metrics for running parts will be exposed * removed junk that came from visual studio code * fixed some typos * Moved the metrics back into each controller and added expose behaviour so only the running components metrics would be published * removed to much, added back instanciation of metricscontroller * fixed some invalid variable names * fixed last typos on config name * fixed order in newnetworkservicecontroller * updated metrics docs & removed the metrics sync period as it will obey the controllers sync period * forgott to save options.go * cleanup * Updated metric name & docs * updated metrics.md * fixed a high cpu usage bug in the metrics_controller's wait loop
186 lines
5.0 KiB
Go
186 lines
5.0 KiB
Go
package app
|
|
|
|
import (
|
|
"errors"
|
|
"os"
|
|
"os/signal"
|
|
"sync"
|
|
"syscall"
|
|
|
|
"github.com/cloudnativelabs/kube-router/app/controllers"
|
|
"github.com/cloudnativelabs/kube-router/app/options"
|
|
"github.com/cloudnativelabs/kube-router/app/watchers"
|
|
"github.com/golang/glog"
|
|
"k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/rest"
|
|
"k8s.io/client-go/tools/clientcmd"
|
|
)
|
|
|
|
// KubeRouter holds the information needed to run server
|
|
type KubeRouter struct {
|
|
Client *kubernetes.Clientset
|
|
Config *options.KubeRouterConfig
|
|
}
|
|
|
|
// NewKubeRouterDefault returns a KubeRouter object
|
|
func NewKubeRouterDefault(config *options.KubeRouterConfig) (*KubeRouter, error) {
|
|
|
|
var clientconfig *rest.Config
|
|
var err error
|
|
// Use out of cluster config if the URL or kubeconfig have been specified. Otherwise use incluster config.
|
|
if len(config.Master) != 0 || len(config.Kubeconfig) != 0 {
|
|
clientconfig, err = clientcmd.BuildConfigFromFlags(config.Master, config.Kubeconfig)
|
|
if err != nil {
|
|
return nil, errors.New("Failed to build configuration from CLI: " + err.Error())
|
|
}
|
|
} else {
|
|
clientconfig, err = rest.InClusterConfig()
|
|
if err != nil {
|
|
return nil, errors.New("unable to initialize inclusterconfig: " + err.Error())
|
|
}
|
|
}
|
|
|
|
clientset, err := kubernetes.NewForConfig(clientconfig)
|
|
if err != nil {
|
|
return nil, errors.New("Failed to create Kubernetes client: " + err.Error())
|
|
}
|
|
|
|
return &KubeRouter{Client: clientset, Config: config}, nil
|
|
}
|
|
|
|
// CleanupConfigAndExit performs Cleanup on all three controllers
|
|
func CleanupConfigAndExit() {
|
|
npc := controllers.NetworkPolicyController{}
|
|
npc.Cleanup()
|
|
|
|
nsc := controllers.NetworkServicesController{}
|
|
nsc.Cleanup()
|
|
|
|
nrc := controllers.NetworkRoutingController{}
|
|
nrc.Cleanup()
|
|
}
|
|
|
|
// start API watchers to get notification on changes
|
|
func (kr *KubeRouter) startApiWatchers() error {
|
|
|
|
var err error
|
|
|
|
_, err = watchers.StartPodWatcher(kr.Client, kr.Config.ConfigSyncPeriod)
|
|
if err != nil {
|
|
return errors.New("Failed to launch pod api watcher: " + err.Error())
|
|
}
|
|
|
|
_, err = watchers.StartEndpointsWatcher(kr.Client, kr.Config.ConfigSyncPeriod)
|
|
if err != nil {
|
|
return errors.New("Failed to launch endpoint api watcher: " + err.Error())
|
|
}
|
|
|
|
_, err = watchers.StartNetworkPolicyWatcher(kr.Client, kr.Config.ConfigSyncPeriod)
|
|
if err != nil {
|
|
return errors.New("Failed to launch network policy api watcher: " + err.Error())
|
|
}
|
|
|
|
_, err = watchers.StartNamespaceWatcher(kr.Client, kr.Config.ConfigSyncPeriod)
|
|
if err != nil {
|
|
return errors.New("Failed to launch namespace api watcher: " + err.Error())
|
|
}
|
|
|
|
_, err = watchers.StartServiceWatcher(kr.Client, kr.Config.ConfigSyncPeriod)
|
|
if err != nil {
|
|
return errors.New("Failed to launch service api watcher: " + err.Error())
|
|
}
|
|
|
|
_, err = watchers.StartNodeWatcher(kr.Client, kr.Config.ConfigSyncPeriod)
|
|
if err != nil {
|
|
return errors.New("Failed to launch nodes api watcher: " + err.Error())
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (kr *KubeRouter) stopApiWatchers() {
|
|
watchers.StopPodWatcher()
|
|
watchers.StopEndpointsWatcher()
|
|
watchers.StopNetworkPolicyWatcher()
|
|
watchers.StopNamespaceWatcher()
|
|
watchers.StopServiceWatcher()
|
|
watchers.StopNodeWatcher()
|
|
}
|
|
|
|
// Run starts the controllers and waits forever till we get SIGINT or SIGTERM
|
|
func (kr *KubeRouter) Run() error {
|
|
|
|
var err error
|
|
var wg sync.WaitGroup
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
err = kr.startApiWatchers()
|
|
if err != nil {
|
|
return errors.New("Failed to start API watchers: " + err.Error())
|
|
}
|
|
|
|
if !(kr.Config.RunFirewall || kr.Config.RunServiceProxy || kr.Config.RunRouter) {
|
|
glog.Info("Router, Firewall or Service proxy functionality must be specified. Exiting!")
|
|
os.Exit(0)
|
|
}
|
|
|
|
if (kr.Config.MetricsPort > 0) && (kr.Config.MetricsPort <= 65535) {
|
|
kr.Config.MetricsEnabled = true
|
|
mc, err := controllers.NewMetricsController(kr.Client, kr.Config)
|
|
if err != nil {
|
|
return errors.New("Failed to create metrics controller: " + err.Error())
|
|
}
|
|
wg.Add(1)
|
|
go mc.Run(stopCh, &wg)
|
|
} else if kr.Config.MetricsPort > 65535 {
|
|
glog.Errorf("Metrics port must be over 0 and under 65535, given port: %d", kr.Config.MetricsPort)
|
|
kr.Config.MetricsEnabled = false
|
|
} else {
|
|
kr.Config.MetricsEnabled = false
|
|
}
|
|
|
|
if kr.Config.RunFirewall {
|
|
npc, err := controllers.NewNetworkPolicyController(kr.Client, kr.Config)
|
|
if err != nil {
|
|
return errors.New("Failed to create network policy controller: " + err.Error())
|
|
}
|
|
|
|
wg.Add(1)
|
|
go npc.Run(stopCh, &wg)
|
|
}
|
|
|
|
if kr.Config.RunRouter {
|
|
nrc, err := controllers.NewNetworkRoutingController(kr.Client, kr.Config)
|
|
if err != nil {
|
|
return errors.New("Failed to create network routing controller: " + err.Error())
|
|
}
|
|
|
|
wg.Add(1)
|
|
go nrc.Run(stopCh, &wg)
|
|
}
|
|
|
|
if kr.Config.RunServiceProxy {
|
|
nsc, err := controllers.NewNetworkServicesController(kr.Client, kr.Config)
|
|
if err != nil {
|
|
return errors.New("Failed to create network services controller: " + err.Error())
|
|
}
|
|
|
|
wg.Add(1)
|
|
go nsc.Run(stopCh, &wg)
|
|
}
|
|
|
|
// Handle SIGINT and SIGTERM
|
|
ch := make(chan os.Signal)
|
|
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
|
<-ch
|
|
|
|
glog.Infof("Shutting down the controllers")
|
|
close(stopCh)
|
|
|
|
kr.stopApiWatchers()
|
|
|
|
wg.Wait()
|
|
return nil
|
|
}
|